PAL: Fix sh-run
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     threads[idx].append([
301                         gnode,
302                         int(gdata[u"calls"]),
303                         int(gdata[u"vectors"]),
304                         int(gdata[u"suspends"]),
305                         float(gdata[u"clocks"]),
306                         float(gdata[u"vectors"] / gdata[u"calls"]) \
307                             if gdata[u"calls"] else 0.0
308                     ])
309             bold = ET.SubElement(tcol, u"b")
310             bold.text = (
311                 f"Host IP: {dut_data.get(u'host', '')}, "
312                 f"Socket: {dut_data.get(u'socket', '')}"
313             )
314             trow = ET.SubElement(
315                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
316             )
317             thead = ET.SubElement(
318                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
319             )
320             thead.text = u"\t"
321
322             for thread_nr, thread in threads.items():
323                 trow = ET.SubElement(
324                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
325                 )
326                 tcol = ET.SubElement(
327                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
328                 )
329                 bold = ET.SubElement(tcol, u"b")
330                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
331                 trow = ET.SubElement(
332                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
333                 )
334                 for idx, col in enumerate(tbl_hdr):
335                     tcol = ET.SubElement(
336                         trow, u"td",
337                         attrib=dict(align=u"right" if idx else u"left")
338                     )
339                     font = ET.SubElement(
340                         tcol, u"font", attrib=dict(size=u"2")
341                     )
342                     bold = ET.SubElement(font, u"b")
343                     bold.text = col
344                 for row_nr, row in enumerate(thread):
345                     trow = ET.SubElement(
346                         tbl, u"tr",
347                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
348                     )
349                     for idx, col in enumerate(row):
350                         tcol = ET.SubElement(
351                             trow, u"td",
352                             attrib=dict(align=u"right" if idx else u"left")
353                         )
354                         font = ET.SubElement(
355                             tcol, u"font", attrib=dict(size=u"2")
356                         )
357                         if isinstance(col, float):
358                             font.text = f"{col:.2f}"
359                         else:
360                             font.text = str(col)
361                 trow = ET.SubElement(
362                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
363                 )
364                 thead = ET.SubElement(
365                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
366                 )
367                 thead.text = u"\t"
368
369         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
370         thead = ET.SubElement(
371             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
372         )
373         font = ET.SubElement(
374             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
375         )
376         font.text = u"."
377
378         return str(ET.tostring(tbl, encoding=u"unicode"))
379
380     for suite in suites.values:
381         html_table = str()
382         for test_data in data.values:
383             if test_data[u"parent"] not in suite[u"name"]:
384                 continue
385             html_table += _generate_html_table(test_data)
386         if not html_table:
387             continue
388         try:
389             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
390             with open(f"{file_name}", u'w') as html_file:
391                 logging.info(f"    Writing file: {file_name}")
392                 html_file.write(u".. raw:: html\n\n\t")
393                 html_file.write(html_table)
394                 html_file.write(u"\n\t<p><br><br></p>\n")
395         except KeyError:
396             logging.warning(u"The output file is not defined.")
397             return
398     logging.info(u"  Done.")
399
400
401 def table_merged_details(table, input_data):
402     """Generate the table(s) with algorithm: table_merged_details
403     specified in the specification file.
404
405     :param table: Table to generate.
406     :param input_data: Data to process.
407     :type table: pandas.Series
408     :type input_data: InputData
409     """
410
411     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
412
413     # Transform the data
414     logging.info(
415         f"    Creating the data set for the {table.get(u'type', u'')} "
416         f"{table.get(u'title', u'')}."
417     )
418     data = input_data.filter_data(table, continue_on_error=True)
419     data = input_data.merge_data(data)
420
421     sort_tests = table.get(u"sort", None)
422     if sort_tests:
423         args = dict(
424             inplace=True,
425             ascending=(sort_tests == u"ascending")
426         )
427         data.sort_index(**args)
428
429     suites = input_data.filter_data(
430         table, continue_on_error=True, data_set=u"suites")
431     suites = input_data.merge_data(suites)
432
433     # Prepare the header of the tables
434     header = list()
435     for column in table[u"columns"]:
436         header.append(
437             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
438         )
439
440     for suite in suites.values:
441         # Generate data
442         suite_name = suite[u"name"]
443         table_lst = list()
444         for test in data.keys():
445             if data[test][u"status"] != u"PASS" or \
446                     data[test][u"parent"] not in suite_name:
447                 continue
448             row_lst = list()
449             for column in table[u"columns"]:
450                 try:
451                     col_data = str(data[test][column[
452                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
453                     # Do not include tests with "Test Failed" in test message
454                     if u"Test Failed" in col_data:
455                         continue
456                     col_data = col_data.replace(
457                         u"No Data", u"Not Captured     "
458                     )
459                     if column[u"data"].split(u" ")[1] in (u"name", ):
460                         if len(col_data) > 30:
461                             col_data_lst = col_data.split(u"-")
462                             half = int(len(col_data_lst) / 2)
463                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
464                                        f"- |br| " \
465                                        f"{u'-'.join(col_data_lst[half:])}"
466                         col_data = f" |prein| {col_data} |preout| "
467                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
468                         # Temporary solution: remove NDR results from message:
469                         if bool(table.get(u'remove-ndr', False)):
470                             try:
471                                 col_data = col_data.split(u"\n", 1)[1]
472                             except IndexError:
473                                 pass
474                         col_data = col_data.replace(u'\n', u' |br| ').\
475                             replace(u'\r', u'').replace(u'"', u"'")
476                         col_data = f" |prein| {col_data} |preout| "
477                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
478                         col_data = col_data.replace(u'\n', u' |br| ')
479                         col_data = f" |prein| {col_data[:-5]} |preout| "
480                     row_lst.append(f'"{col_data}"')
481                 except KeyError:
482                     row_lst.append(u'"Not captured"')
483             if len(row_lst) == len(table[u"columns"]):
484                 table_lst.append(row_lst)
485
486         # Write the data to file
487         if table_lst:
488             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
489             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
490             logging.info(f"      Writing file: {file_name}")
491             with open(file_name, u"wt") as file_handler:
492                 file_handler.write(u",".join(header) + u"\n")
493                 for item in table_lst:
494                     file_handler.write(u",".join(item) + u"\n")
495
496     logging.info(u"  Done.")
497
498
499 def _tpc_modify_test_name(test_name, ignore_nic=False):
500     """Modify a test name by replacing its parts.
501
502     :param test_name: Test name to be modified.
503     :param ignore_nic: If True, NIC is removed from TC name.
504     :type test_name: str
505     :type ignore_nic: bool
506     :returns: Modified test name.
507     :rtype: str
508     """
509     test_name_mod = test_name.\
510         replace(u"-ndrpdr", u"").\
511         replace(u"1t1c", u"1c").\
512         replace(u"2t1c", u"1c"). \
513         replace(u"2t2c", u"2c").\
514         replace(u"4t2c", u"2c"). \
515         replace(u"4t4c", u"4c").\
516         replace(u"8t4c", u"4c")
517
518     if ignore_nic:
519         return re.sub(REGEX_NIC, u"", test_name_mod)
520     return test_name_mod
521
522
523 def _tpc_modify_displayed_test_name(test_name):
524     """Modify a test name which is displayed in a table by replacing its parts.
525
526     :param test_name: Test name to be modified.
527     :type test_name: str
528     :returns: Modified test name.
529     :rtype: str
530     """
531     return test_name.\
532         replace(u"1t1c", u"1c").\
533         replace(u"2t1c", u"1c"). \
534         replace(u"2t2c", u"2c").\
535         replace(u"4t2c", u"2c"). \
536         replace(u"4t4c", u"4c").\
537         replace(u"8t4c", u"4c")
538
539
540 def _tpc_insert_data(target, src, include_tests):
541     """Insert src data to the target structure.
542
543     :param target: Target structure where the data is placed.
544     :param src: Source data to be placed into the target structure.
545     :param include_tests: Which results will be included (MRR, NDR, PDR).
546     :type target: list
547     :type src: dict
548     :type include_tests: str
549     """
550     try:
551         if include_tests == u"MRR":
552             target[u"mean"] = src[u"result"][u"receive-rate"]
553             target[u"stdev"] = src[u"result"][u"receive-stdev"]
554         elif include_tests == u"PDR":
555             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
556         elif include_tests == u"NDR":
557             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
558         elif u"latency" in include_tests:
559             keys = include_tests.split(u"-")
560             if len(keys) == 4:
561                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
562                 target[u"data"].append(
563                     float(u"nan") if lat == -1 else lat * 1e6
564                 )
565     except (KeyError, TypeError):
566         pass
567
568
569 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
570                              footnote=u"", sort_data=True, title=u"",
571                              generate_rst=True):
572     """Generate html table from input data with simple sorting possibility.
573
574     :param header: Table header.
575     :param data: Input data to be included in the table. It is a list of lists.
576         Inner lists are rows in the table. All inner lists must be of the same
577         length. The length of these lists must be the same as the length of the
578         header.
579     :param out_file_name: The name (relative or full path) where the
580         generated html table is written.
581     :param legend: The legend to display below the table.
582     :param footnote: The footnote to display below the table (and legend).
583     :param sort_data: If True the data sorting is enabled.
584     :param title: The table (and file) title.
585     :param generate_rst: If True, wrapping rst file is generated.
586     :type header: list
587     :type data: list of lists
588     :type out_file_name: str
589     :type legend: str
590     :type footnote: str
591     :type sort_data: bool
592     :type title: str
593     :type generate_rst: bool
594     """
595
596     try:
597         idx = header.index(u"Test Case")
598     except ValueError:
599         idx = 0
600     params = {
601         u"align-hdr": (
602             [u"left", u"right"],
603             [u"left", u"left", u"right"],
604             [u"left", u"left", u"left", u"right"]
605         ),
606         u"align-itm": (
607             [u"left", u"right"],
608             [u"left", u"left", u"right"],
609             [u"left", u"left", u"left", u"right"]
610         ),
611         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
612     }
613
614     df_data = pd.DataFrame(data, columns=header)
615
616     if sort_data:
617         df_sorted = [df_data.sort_values(
618             by=[key, header[idx]], ascending=[True, True]
619             if key != header[idx] else [False, True]) for key in header]
620         df_sorted_rev = [df_data.sort_values(
621             by=[key, header[idx]], ascending=[False, True]
622             if key != header[idx] else [True, True]) for key in header]
623         df_sorted.extend(df_sorted_rev)
624     else:
625         df_sorted = df_data
626
627     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
628                    for idx in range(len(df_data))]]
629     table_header = dict(
630         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
631         fill_color=u"#7eade7",
632         align=params[u"align-hdr"][idx],
633         font=dict(
634             family=u"Courier New",
635             size=12
636         )
637     )
638
639     fig = go.Figure()
640
641     if sort_data:
642         for table in df_sorted:
643             columns = [table.get(col) for col in header]
644             fig.add_trace(
645                 go.Table(
646                     columnwidth=params[u"width"][idx],
647                     header=table_header,
648                     cells=dict(
649                         values=columns,
650                         fill_color=fill_color,
651                         align=params[u"align-itm"][idx],
652                         font=dict(
653                             family=u"Courier New",
654                             size=12
655                         )
656                     )
657                 )
658             )
659
660         buttons = list()
661         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
662         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
663         for idx, hdr in enumerate(menu_items):
664             visible = [False, ] * len(menu_items)
665             visible[idx] = True
666             buttons.append(
667                 dict(
668                     label=hdr.replace(u" [Mpps]", u""),
669                     method=u"update",
670                     args=[{u"visible": visible}],
671                 )
672             )
673
674         fig.update_layout(
675             updatemenus=[
676                 go.layout.Updatemenu(
677                     type=u"dropdown",
678                     direction=u"down",
679                     x=0.0,
680                     xanchor=u"left",
681                     y=1.002,
682                     yanchor=u"bottom",
683                     active=len(menu_items) - 1,
684                     buttons=list(buttons)
685                 )
686             ],
687         )
688     else:
689         fig.add_trace(
690             go.Table(
691                 columnwidth=params[u"width"][idx],
692                 header=table_header,
693                 cells=dict(
694                     values=[df_sorted.get(col) for col in header],
695                     fill_color=fill_color,
696                     align=params[u"align-itm"][idx],
697                     font=dict(
698                         family=u"Courier New",
699                         size=12
700                     )
701                 )
702             )
703         )
704
705     ploff.plot(
706         fig,
707         show_link=False,
708         auto_open=False,
709         filename=f"{out_file_name}_in.html"
710     )
711
712     if not generate_rst:
713         return
714
715     file_name = out_file_name.split(u"/")[-1]
716     if u"vpp" in out_file_name:
717         path = u"_tmp/src/vpp_performance_tests/comparisons/"
718     else:
719         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
720     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
721     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
722         rst_file.write(
723             u"\n"
724             u".. |br| raw:: html\n\n    <br />\n\n\n"
725             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
726             u".. |preout| raw:: html\n\n    </pre>\n\n"
727         )
728         if title:
729             rst_file.write(f"{title}\n")
730             rst_file.write(f"{u'`' * len(title)}\n\n")
731         rst_file.write(
732             u".. raw:: html\n\n"
733             f'    <iframe frameborder="0" scrolling="no" '
734             f'width="1600" height="1200" '
735             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
736             f'</iframe>\n\n'
737         )
738
739         if legend:
740             try:
741                 itm_lst = legend[1:-2].split(u"\n")
742                 rst_file.write(
743                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
744                 )
745             except IndexError as err:
746                 logging.error(f"Legend cannot be written to html file\n{err}")
747         if footnote:
748             try:
749                 itm_lst = footnote[1:].split(u"\n")
750                 rst_file.write(
751                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
752                 )
753             except IndexError as err:
754                 logging.error(f"Footnote cannot be written to html file\n{err}")
755
756
757 def table_soak_vs_ndr(table, input_data):
758     """Generate the table(s) with algorithm: table_soak_vs_ndr
759     specified in the specification file.
760
761     :param table: Table to generate.
762     :param input_data: Data to process.
763     :type table: pandas.Series
764     :type input_data: InputData
765     """
766
767     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
768
769     # Transform the data
770     logging.info(
771         f"    Creating the data set for the {table.get(u'type', u'')} "
772         f"{table.get(u'title', u'')}."
773     )
774     data = input_data.filter_data(table, continue_on_error=True)
775
776     # Prepare the header of the table
777     try:
778         header = [
779             u"Test Case",
780             f"Avg({table[u'reference'][u'title']})",
781             f"Stdev({table[u'reference'][u'title']})",
782             f"Avg({table[u'compare'][u'title']})",
783             f"Stdev{table[u'compare'][u'title']})",
784             u"Diff",
785             u"Stdev(Diff)"
786         ]
787         header_str = u";".join(header) + u"\n"
788         legend = (
789             u"\nLegend:\n"
790             f"Avg({table[u'reference'][u'title']}): "
791             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
792             f"from a series of runs of the listed tests.\n"
793             f"Stdev({table[u'reference'][u'title']}): "
794             f"Standard deviation value of {table[u'reference'][u'title']} "
795             f"[Mpps] computed from a series of runs of the listed tests.\n"
796             f"Avg({table[u'compare'][u'title']}): "
797             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
798             f"a series of runs of the listed tests.\n"
799             f"Stdev({table[u'compare'][u'title']}): "
800             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
801             f"computed from a series of runs of the listed tests.\n"
802             f"Diff({table[u'reference'][u'title']},"
803             f"{table[u'compare'][u'title']}): "
804             f"Percentage change calculated for mean values.\n"
805             u"Stdev(Diff): "
806             u"Standard deviation of percentage change calculated for mean "
807             u"values."
808         )
809     except (AttributeError, KeyError) as err:
810         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
811         return
812
813     # Create a list of available SOAK test results:
814     tbl_dict = dict()
815     for job, builds in table[u"compare"][u"data"].items():
816         for build in builds:
817             for tst_name, tst_data in data[job][str(build)].items():
818                 if tst_data[u"type"] == u"SOAK":
819                     tst_name_mod = tst_name.replace(u"-soak", u"")
820                     if tbl_dict.get(tst_name_mod, None) is None:
821                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
822                         nic = groups.group(0) if groups else u""
823                         name = (
824                             f"{nic}-"
825                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
826                         )
827                         tbl_dict[tst_name_mod] = {
828                             u"name": name,
829                             u"ref-data": list(),
830                             u"cmp-data": list()
831                         }
832                     try:
833                         tbl_dict[tst_name_mod][u"cmp-data"].append(
834                             tst_data[u"throughput"][u"LOWER"])
835                     except (KeyError, TypeError):
836                         pass
837     tests_lst = tbl_dict.keys()
838
839     # Add corresponding NDR test results:
840     for job, builds in table[u"reference"][u"data"].items():
841         for build in builds:
842             for tst_name, tst_data in data[job][str(build)].items():
843                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
844                     replace(u"-mrr", u"")
845                 if tst_name_mod not in tests_lst:
846                     continue
847                 try:
848                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
849                         continue
850                     if table[u"include-tests"] == u"MRR":
851                         result = (tst_data[u"result"][u"receive-rate"],
852                                   tst_data[u"result"][u"receive-stdev"])
853                     elif table[u"include-tests"] == u"PDR":
854                         result = \
855                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
856                     elif table[u"include-tests"] == u"NDR":
857                         result = \
858                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
859                     else:
860                         result = None
861                     if result is not None:
862                         tbl_dict[tst_name_mod][u"ref-data"].append(
863                             result)
864                 except (KeyError, TypeError):
865                     continue
866
867     tbl_lst = list()
868     for tst_name in tbl_dict:
869         item = [tbl_dict[tst_name][u"name"], ]
870         data_r = tbl_dict[tst_name][u"ref-data"]
871         if data_r:
872             if table[u"include-tests"] == u"MRR":
873                 data_r_mean = data_r[0][0]
874                 data_r_stdev = data_r[0][1]
875             else:
876                 data_r_mean = mean(data_r)
877                 data_r_stdev = stdev(data_r)
878             item.append(round(data_r_mean / 1e6, 1))
879             item.append(round(data_r_stdev / 1e6, 1))
880         else:
881             data_r_mean = None
882             data_r_stdev = None
883             item.extend([None, None])
884         data_c = tbl_dict[tst_name][u"cmp-data"]
885         if data_c:
886             if table[u"include-tests"] == u"MRR":
887                 data_c_mean = data_c[0][0]
888                 data_c_stdev = data_c[0][1]
889             else:
890                 data_c_mean = mean(data_c)
891                 data_c_stdev = stdev(data_c)
892             item.append(round(data_c_mean / 1e6, 1))
893             item.append(round(data_c_stdev / 1e6, 1))
894         else:
895             data_c_mean = None
896             data_c_stdev = None
897             item.extend([None, None])
898         if data_r_mean is not None and data_c_mean is not None:
899             delta, d_stdev = relative_change_stdev(
900                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
901             try:
902                 item.append(round(delta))
903             except ValueError:
904                 item.append(delta)
905             try:
906                 item.append(round(d_stdev))
907             except ValueError:
908                 item.append(d_stdev)
909             tbl_lst.append(item)
910
911     # Sort the table according to the relative change
912     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
913
914     # Generate csv tables:
915     csv_file_name = f"{table[u'output-file']}.csv"
916     with open(csv_file_name, u"wt") as file_handler:
917         file_handler.write(header_str)
918         for test in tbl_lst:
919             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
920
921     convert_csv_to_pretty_txt(
922         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
923     )
924     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
925         file_handler.write(legend)
926
927     # Generate html table:
928     _tpc_generate_html_table(
929         header,
930         tbl_lst,
931         table[u'output-file'],
932         legend=legend,
933         title=table.get(u"title", u"")
934     )
935
936
937 def table_perf_trending_dash(table, input_data):
938     """Generate the table(s) with algorithm:
939     table_perf_trending_dash
940     specified in the specification file.
941
942     :param table: Table to generate.
943     :param input_data: Data to process.
944     :type table: pandas.Series
945     :type input_data: InputData
946     """
947
948     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
949
950     # Transform the data
951     logging.info(
952         f"    Creating the data set for the {table.get(u'type', u'')} "
953         f"{table.get(u'title', u'')}."
954     )
955     data = input_data.filter_data(table, continue_on_error=True)
956
957     # Prepare the header of the tables
958     header = [
959         u"Test Case",
960         u"Trend [Mpps]",
961         u"Short-Term Change [%]",
962         u"Long-Term Change [%]",
963         u"Regressions [#]",
964         u"Progressions [#]"
965     ]
966     header_str = u",".join(header) + u"\n"
967
968     incl_tests = table.get(u"include-tests", u"MRR")
969
970     # Prepare data to the table:
971     tbl_dict = dict()
972     for job, builds in table[u"data"].items():
973         for build in builds:
974             for tst_name, tst_data in data[job][str(build)].items():
975                 if tst_name.lower() in table.get(u"ignore-list", list()):
976                     continue
977                 if tbl_dict.get(tst_name, None) is None:
978                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
979                     if not groups:
980                         continue
981                     nic = groups.group(0)
982                     tbl_dict[tst_name] = {
983                         u"name": f"{nic}-{tst_data[u'name']}",
984                         u"data": OrderedDict()
985                     }
986                 try:
987                     if incl_tests == u"MRR":
988                         tbl_dict[tst_name][u"data"][str(build)] = \
989                             tst_data[u"result"][u"receive-rate"]
990                     elif incl_tests == u"NDR":
991                         tbl_dict[tst_name][u"data"][str(build)] = \
992                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
993                     elif incl_tests == u"PDR":
994                         tbl_dict[tst_name][u"data"][str(build)] = \
995                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
996                 except (TypeError, KeyError):
997                     pass  # No data in output.xml for this test
998
999     tbl_lst = list()
1000     for tst_name in tbl_dict:
1001         data_t = tbl_dict[tst_name][u"data"]
1002         if len(data_t) < 2:
1003             continue
1004
1005         try:
1006             classification_lst, avgs, _ = classify_anomalies(data_t)
1007         except ValueError as err:
1008             logging.info(f"{err} Skipping")
1009             return
1010
1011         win_size = min(len(data_t), table[u"window"])
1012         long_win_size = min(len(data_t), table[u"long-trend-window"])
1013
1014         try:
1015             max_long_avg = max(
1016                 [x for x in avgs[-long_win_size:-win_size]
1017                  if not isnan(x)])
1018         except ValueError:
1019             max_long_avg = nan
1020         last_avg = avgs[-1]
1021         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1022
1023         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1024             rel_change_last = nan
1025         else:
1026             rel_change_last = round(
1027                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1028
1029         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1030             rel_change_long = nan
1031         else:
1032             rel_change_long = round(
1033                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1034
1035         if classification_lst:
1036             if isnan(rel_change_last) and isnan(rel_change_long):
1037                 continue
1038             if isnan(last_avg) or isnan(rel_change_last) or \
1039                     isnan(rel_change_long):
1040                 continue
1041             tbl_lst.append(
1042                 [tbl_dict[tst_name][u"name"],
1043                  round(last_avg / 1e6, 2),
1044                  rel_change_last,
1045                  rel_change_long,
1046                  classification_lst[-win_size+1:].count(u"regression"),
1047                  classification_lst[-win_size+1:].count(u"progression")])
1048
1049     tbl_lst.sort(key=lambda rel: rel[0])
1050     tbl_lst.sort(key=lambda rel: rel[3])
1051     tbl_lst.sort(key=lambda rel: rel[2])
1052
1053     tbl_sorted = list()
1054     for nrr in range(table[u"window"], -1, -1):
1055         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1056         for nrp in range(table[u"window"], -1, -1):
1057             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1058             tbl_sorted.extend(tbl_out)
1059
1060     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1061
1062     logging.info(f"    Writing file: {file_name}")
1063     with open(file_name, u"wt") as file_handler:
1064         file_handler.write(header_str)
1065         for test in tbl_sorted:
1066             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1067
1068     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1069     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1070
1071
1072 def _generate_url(testbed, test_name):
1073     """Generate URL to a trending plot from the name of the test case.
1074
1075     :param testbed: The testbed used for testing.
1076     :param test_name: The name of the test case.
1077     :type testbed: str
1078     :type test_name: str
1079     :returns: The URL to the plot with the trending data for the given test
1080         case.
1081     :rtype str
1082     """
1083
1084     if u"x520" in test_name:
1085         nic = u"x520"
1086     elif u"x710" in test_name:
1087         nic = u"x710"
1088     elif u"xl710" in test_name:
1089         nic = u"xl710"
1090     elif u"xxv710" in test_name:
1091         nic = u"xxv710"
1092     elif u"vic1227" in test_name:
1093         nic = u"vic1227"
1094     elif u"vic1385" in test_name:
1095         nic = u"vic1385"
1096     elif u"x553" in test_name:
1097         nic = u"x553"
1098     elif u"cx556" in test_name or u"cx556a" in test_name:
1099         nic = u"cx556a"
1100     else:
1101         nic = u""
1102
1103     if u"64b" in test_name:
1104         frame_size = u"64b"
1105     elif u"78b" in test_name:
1106         frame_size = u"78b"
1107     elif u"imix" in test_name:
1108         frame_size = u"imix"
1109     elif u"9000b" in test_name:
1110         frame_size = u"9000b"
1111     elif u"1518b" in test_name:
1112         frame_size = u"1518b"
1113     elif u"114b" in test_name:
1114         frame_size = u"114b"
1115     else:
1116         frame_size = u""
1117
1118     if u"1t1c" in test_name or \
1119         (u"-1c-" in test_name and
1120          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1121         cores = u"1t1c"
1122     elif u"2t2c" in test_name or \
1123          (u"-2c-" in test_name and
1124           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1125         cores = u"2t2c"
1126     elif u"4t4c" in test_name or \
1127          (u"-4c-" in test_name and
1128           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1129         cores = u"4t4c"
1130     elif u"2t1c" in test_name or \
1131          (u"-1c-" in test_name and
1132           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1133         cores = u"2t1c"
1134     elif u"4t2c" in test_name or \
1135          (u"-2c-" in test_name and
1136           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1137         cores = u"4t2c"
1138     elif u"8t4c" in test_name or \
1139          (u"-4c-" in test_name and
1140           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1141         cores = u"8t4c"
1142     else:
1143         cores = u""
1144
1145     if u"testpmd" in test_name:
1146         driver = u"testpmd"
1147     elif u"l3fwd" in test_name:
1148         driver = u"l3fwd"
1149     elif u"avf" in test_name:
1150         driver = u"avf"
1151     elif u"rdma" in test_name:
1152         driver = u"rdma"
1153     elif u"dnv" in testbed or u"tsh" in testbed:
1154         driver = u"ixgbe"
1155     else:
1156         driver = u"dpdk"
1157
1158     if u"macip-iacl1s" in test_name:
1159         bsf = u"features-macip-iacl1"
1160     elif u"macip-iacl10s" in test_name:
1161         bsf = u"features-macip-iacl10"
1162     elif u"macip-iacl50s" in test_name:
1163         bsf = u"features-macip-iacl50"
1164     elif u"iacl1s" in test_name:
1165         bsf = u"features-iacl1"
1166     elif u"iacl10s" in test_name:
1167         bsf = u"features-iacl10"
1168     elif u"iacl50s" in test_name:
1169         bsf = u"features-iacl50"
1170     elif u"oacl1s" in test_name:
1171         bsf = u"features-oacl1"
1172     elif u"oacl10s" in test_name:
1173         bsf = u"features-oacl10"
1174     elif u"oacl50s" in test_name:
1175         bsf = u"features-oacl50"
1176     elif u"nat44det" in test_name:
1177         bsf = u"nat44det-bidir"
1178     elif u"nat44ed" in test_name and u"udir" in test_name:
1179         bsf = u"nat44ed-udir"
1180     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1181         bsf = u"udp-cps"
1182     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1183         bsf = u"tcp-cps"
1184     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1185         bsf = u"udp-pps"
1186     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1187         bsf = u"tcp-pps"
1188     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1189         bsf = u"udp-tput"
1190     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1191         bsf = u"tcp-tput"
1192     elif u"udpsrcscale" in test_name:
1193         bsf = u"features-udp"
1194     elif u"iacl" in test_name:
1195         bsf = u"features"
1196     elif u"policer" in test_name:
1197         bsf = u"features"
1198     elif u"adl" in test_name:
1199         bsf = u"features"
1200     elif u"cop" in test_name:
1201         bsf = u"features"
1202     elif u"nat" in test_name:
1203         bsf = u"features"
1204     elif u"macip" in test_name:
1205         bsf = u"features"
1206     elif u"scale" in test_name:
1207         bsf = u"scale"
1208     elif u"base" in test_name:
1209         bsf = u"base"
1210     else:
1211         bsf = u"base"
1212
1213     if u"114b" in test_name and u"vhost" in test_name:
1214         domain = u"vts"
1215     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1216         domain = u"nat44"
1217         if u"nat44det" in test_name:
1218             domain += u"-det-bidir"
1219         else:
1220             domain += u"-ed"
1221         if u"udir" in test_name:
1222             domain += u"-unidir"
1223         elif u"-ethip4udp-" in test_name:
1224             domain += u"-udp"
1225         elif u"-ethip4tcp-" in test_name:
1226             domain += u"-tcp"
1227         if u"-cps" in test_name:
1228             domain += u"-cps"
1229         elif u"-pps" in test_name:
1230             domain += u"-pps"
1231         elif u"-tput" in test_name:
1232             domain += u"-tput"
1233     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1234         domain = u"dpdk"
1235     elif u"memif" in test_name:
1236         domain = u"container_memif"
1237     elif u"srv6" in test_name:
1238         domain = u"srv6"
1239     elif u"vhost" in test_name:
1240         domain = u"vhost"
1241         if u"vppl2xc" in test_name:
1242             driver += u"-vpp"
1243         else:
1244             driver += u"-testpmd"
1245         if u"lbvpplacp" in test_name:
1246             bsf += u"-link-bonding"
1247     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1248         domain = u"nf_service_density_vnfc"
1249     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1250         domain = u"nf_service_density_cnfc"
1251     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1252         domain = u"nf_service_density_cnfp"
1253     elif u"ipsec" in test_name:
1254         domain = u"ipsec"
1255         if u"sw" in test_name:
1256             bsf += u"-sw"
1257         elif u"hw" in test_name:
1258             bsf += u"-hw"
1259     elif u"ethip4vxlan" in test_name:
1260         domain = u"ip4_tunnels"
1261     elif u"ethip4udpgeneve" in test_name:
1262         domain = u"ip4_tunnels"
1263     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1264         domain = u"ip4"
1265     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1266         domain = u"ip6"
1267     elif u"l2xcbase" in test_name or \
1268             u"l2xcscale" in test_name or \
1269             u"l2bdbasemaclrn" in test_name or \
1270             u"l2bdscale" in test_name or \
1271             u"l2patch" in test_name:
1272         domain = u"l2"
1273     else:
1274         domain = u""
1275
1276     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1277     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1278
1279     return file_name + anchor_name
1280
1281
1282 def table_perf_trending_dash_html(table, input_data):
1283     """Generate the table(s) with algorithm:
1284     table_perf_trending_dash_html specified in the specification
1285     file.
1286
1287     :param table: Table to generate.
1288     :param input_data: Data to process.
1289     :type table: dict
1290     :type input_data: InputData
1291     """
1292
1293     _ = input_data
1294
1295     if not table.get(u"testbed", None):
1296         logging.error(
1297             f"The testbed is not defined for the table "
1298             f"{table.get(u'title', u'')}. Skipping."
1299         )
1300         return
1301
1302     test_type = table.get(u"test-type", u"MRR")
1303     if test_type not in (u"MRR", u"NDR", u"PDR"):
1304         logging.error(
1305             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1306             f"Skipping."
1307         )
1308         return
1309
1310     if test_type in (u"NDR", u"PDR"):
1311         lnk_dir = u"../ndrpdr_trending/"
1312         lnk_sufix = f"-{test_type.lower()}"
1313     else:
1314         lnk_dir = u"../trending/"
1315         lnk_sufix = u""
1316
1317     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1318
1319     try:
1320         with open(table[u"input-file"], u'rt') as csv_file:
1321             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1322     except FileNotFoundError as err:
1323         logging.warning(f"{err}")
1324         return
1325     except KeyError:
1326         logging.warning(u"The input file is not defined.")
1327         return
1328     except csv.Error as err:
1329         logging.warning(
1330             f"Not possible to process the file {table[u'input-file']}.\n"
1331             f"{repr(err)}"
1332         )
1333         return
1334
1335     # Table:
1336     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1337
1338     # Table header:
1339     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1340     for idx, item in enumerate(csv_lst[0]):
1341         alignment = u"left" if idx == 0 else u"center"
1342         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1343         thead.text = item
1344
1345     # Rows:
1346     colors = {
1347         u"regression": (
1348             u"#ffcccc",
1349             u"#ff9999"
1350         ),
1351         u"progression": (
1352             u"#c6ecc6",
1353             u"#9fdf9f"
1354         ),
1355         u"normal": (
1356             u"#e9f1fb",
1357             u"#d4e4f7"
1358         )
1359     }
1360     for r_idx, row in enumerate(csv_lst[1:]):
1361         if int(row[4]):
1362             color = u"regression"
1363         elif int(row[5]):
1364             color = u"progression"
1365         else:
1366             color = u"normal"
1367         trow = ET.SubElement(
1368             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1369         )
1370
1371         # Columns:
1372         for c_idx, item in enumerate(row):
1373             tdata = ET.SubElement(
1374                 trow,
1375                 u"td",
1376                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1377             )
1378             # Name:
1379             if c_idx == 0 and table.get(u"add-links", True):
1380                 ref = ET.SubElement(
1381                     tdata,
1382                     u"a",
1383                     attrib=dict(
1384                         href=f"{lnk_dir}"
1385                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1386                         f"{lnk_sufix}"
1387                     )
1388                 )
1389                 ref.text = item
1390             else:
1391                 tdata.text = item
1392     try:
1393         with open(table[u"output-file"], u'w') as html_file:
1394             logging.info(f"    Writing file: {table[u'output-file']}")
1395             html_file.write(u".. raw:: html\n\n\t")
1396             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1397             html_file.write(u"\n\t<p><br><br></p>\n")
1398     except KeyError:
1399         logging.warning(u"The output file is not defined.")
1400         return
1401
1402
1403 def table_last_failed_tests(table, input_data):
1404     """Generate the table(s) with algorithm: table_last_failed_tests
1405     specified in the specification file.
1406
1407     :param table: Table to generate.
1408     :param input_data: Data to process.
1409     :type table: pandas.Series
1410     :type input_data: InputData
1411     """
1412
1413     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1414
1415     # Transform the data
1416     logging.info(
1417         f"    Creating the data set for the {table.get(u'type', u'')} "
1418         f"{table.get(u'title', u'')}."
1419     )
1420
1421     data = input_data.filter_data(table, continue_on_error=True)
1422
1423     if data is None or data.empty:
1424         logging.warning(
1425             f"    No data for the {table.get(u'type', u'')} "
1426             f"{table.get(u'title', u'')}."
1427         )
1428         return
1429
1430     tbl_list = list()
1431     for job, builds in table[u"data"].items():
1432         for build in builds:
1433             build = str(build)
1434             try:
1435                 version = input_data.metadata(job, build).get(u"version", u"")
1436                 duration = \
1437                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1438             except KeyError:
1439                 logging.error(f"Data for {job}: {build} is not present.")
1440                 return
1441             tbl_list.append(build)
1442             tbl_list.append(version)
1443             failed_tests = list()
1444             passed = 0
1445             failed = 0
1446             for tst_data in data[job][build].values:
1447                 if tst_data[u"status"] != u"FAIL":
1448                     passed += 1
1449                     continue
1450                 failed += 1
1451                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1452                 if not groups:
1453                     continue
1454                 nic = groups.group(0)
1455                 msg = tst_data[u'msg'].replace(u"\n", u"")
1456                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1457                              'xxx.xxx.xxx.xxx', msg)
1458                 msg = msg.split(u'Also teardown failed')[0]
1459                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1460             tbl_list.append(passed)
1461             tbl_list.append(failed)
1462             tbl_list.append(duration)
1463             tbl_list.extend(failed_tests)
1464
1465     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1466     logging.info(f"    Writing file: {file_name}")
1467     with open(file_name, u"wt") as file_handler:
1468         for test in tbl_list:
1469             file_handler.write(f"{test}\n")
1470
1471
1472 def table_failed_tests(table, input_data):
1473     """Generate the table(s) with algorithm: table_failed_tests
1474     specified in the specification file.
1475
1476     :param table: Table to generate.
1477     :param input_data: Data to process.
1478     :type table: pandas.Series
1479     :type input_data: InputData
1480     """
1481
1482     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1483
1484     # Transform the data
1485     logging.info(
1486         f"    Creating the data set for the {table.get(u'type', u'')} "
1487         f"{table.get(u'title', u'')}."
1488     )
1489     data = input_data.filter_data(table, continue_on_error=True)
1490
1491     test_type = u"MRR"
1492     if u"NDRPDR" in table.get(u"filter", list()):
1493         test_type = u"NDRPDR"
1494
1495     # Prepare the header of the tables
1496     header = [
1497         u"Test Case",
1498         u"Failures [#]",
1499         u"Last Failure [Time]",
1500         u"Last Failure [VPP-Build-Id]",
1501         u"Last Failure [CSIT-Job-Build-Id]"
1502     ]
1503
1504     # Generate the data for the table according to the model in the table
1505     # specification
1506
1507     now = dt.utcnow()
1508     timeperiod = timedelta(int(table.get(u"window", 7)))
1509
1510     tbl_dict = dict()
1511     for job, builds in table[u"data"].items():
1512         for build in builds:
1513             build = str(build)
1514             for tst_name, tst_data in data[job][build].items():
1515                 if tst_name.lower() in table.get(u"ignore-list", list()):
1516                     continue
1517                 if tbl_dict.get(tst_name, None) is None:
1518                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1519                     if not groups:
1520                         continue
1521                     nic = groups.group(0)
1522                     tbl_dict[tst_name] = {
1523                         u"name": f"{nic}-{tst_data[u'name']}",
1524                         u"data": OrderedDict()
1525                     }
1526                 try:
1527                     generated = input_data.metadata(job, build).\
1528                         get(u"generated", u"")
1529                     if not generated:
1530                         continue
1531                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1532                     if (now - then) <= timeperiod:
1533                         tbl_dict[tst_name][u"data"][build] = (
1534                             tst_data[u"status"],
1535                             generated,
1536                             input_data.metadata(job, build).get(u"version",
1537                                                                 u""),
1538                             build
1539                         )
1540                 except (TypeError, KeyError) as err:
1541                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1542
1543     max_fails = 0
1544     tbl_lst = list()
1545     for tst_data in tbl_dict.values():
1546         fails_nr = 0
1547         fails_last_date = u""
1548         fails_last_vpp = u""
1549         fails_last_csit = u""
1550         for val in tst_data[u"data"].values():
1551             if val[0] == u"FAIL":
1552                 fails_nr += 1
1553                 fails_last_date = val[1]
1554                 fails_last_vpp = val[2]
1555                 fails_last_csit = val[3]
1556         if fails_nr:
1557             max_fails = fails_nr if fails_nr > max_fails else max_fails
1558             tbl_lst.append([
1559                 tst_data[u"name"],
1560                 fails_nr,
1561                 fails_last_date,
1562                 fails_last_vpp,
1563                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1564                 f"-build-{fails_last_csit}"
1565             ])
1566
1567     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1568     tbl_sorted = list()
1569     for nrf in range(max_fails, -1, -1):
1570         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1571         tbl_sorted.extend(tbl_fails)
1572
1573     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1574     logging.info(f"    Writing file: {file_name}")
1575     with open(file_name, u"wt") as file_handler:
1576         file_handler.write(u",".join(header) + u"\n")
1577         for test in tbl_sorted:
1578             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1579
1580     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1581     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1582
1583
1584 def table_failed_tests_html(table, input_data):
1585     """Generate the table(s) with algorithm: table_failed_tests_html
1586     specified in the specification file.
1587
1588     :param table: Table to generate.
1589     :param input_data: Data to process.
1590     :type table: pandas.Series
1591     :type input_data: InputData
1592     """
1593
1594     _ = input_data
1595
1596     if not table.get(u"testbed", None):
1597         logging.error(
1598             f"The testbed is not defined for the table "
1599             f"{table.get(u'title', u'')}. Skipping."
1600         )
1601         return
1602
1603     test_type = table.get(u"test-type", u"MRR")
1604     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1605         logging.error(
1606             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1607             f"Skipping."
1608         )
1609         return
1610
1611     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1612         lnk_dir = u"../ndrpdr_trending/"
1613         lnk_sufix = u"-pdr"
1614     else:
1615         lnk_dir = u"../trending/"
1616         lnk_sufix = u""
1617
1618     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1619
1620     try:
1621         with open(table[u"input-file"], u'rt') as csv_file:
1622             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1623     except KeyError:
1624         logging.warning(u"The input file is not defined.")
1625         return
1626     except csv.Error as err:
1627         logging.warning(
1628             f"Not possible to process the file {table[u'input-file']}.\n"
1629             f"{repr(err)}"
1630         )
1631         return
1632
1633     # Table:
1634     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1635
1636     # Table header:
1637     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1638     for idx, item in enumerate(csv_lst[0]):
1639         alignment = u"left" if idx == 0 else u"center"
1640         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1641         thead.text = item
1642
1643     # Rows:
1644     colors = (u"#e9f1fb", u"#d4e4f7")
1645     for r_idx, row in enumerate(csv_lst[1:]):
1646         background = colors[r_idx % 2]
1647         trow = ET.SubElement(
1648             failed_tests, u"tr", attrib=dict(bgcolor=background)
1649         )
1650
1651         # Columns:
1652         for c_idx, item in enumerate(row):
1653             tdata = ET.SubElement(
1654                 trow,
1655                 u"td",
1656                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1657             )
1658             # Name:
1659             if c_idx == 0 and table.get(u"add-links", True):
1660                 ref = ET.SubElement(
1661                     tdata,
1662                     u"a",
1663                     attrib=dict(
1664                         href=f"{lnk_dir}"
1665                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1666                         f"{lnk_sufix}"
1667                     )
1668                 )
1669                 ref.text = item
1670             else:
1671                 tdata.text = item
1672     try:
1673         with open(table[u"output-file"], u'w') as html_file:
1674             logging.info(f"    Writing file: {table[u'output-file']}")
1675             html_file.write(u".. raw:: html\n\n\t")
1676             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1677             html_file.write(u"\n\t<p><br><br></p>\n")
1678     except KeyError:
1679         logging.warning(u"The output file is not defined.")
1680         return
1681
1682
1683 def table_comparison(table, input_data):
1684     """Generate the table(s) with algorithm: table_comparison
1685     specified in the specification file.
1686
1687     :param table: Table to generate.
1688     :param input_data: Data to process.
1689     :type table: pandas.Series
1690     :type input_data: InputData
1691     """
1692     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1693
1694     # Transform the data
1695     logging.info(
1696         f"    Creating the data set for the {table.get(u'type', u'')} "
1697         f"{table.get(u'title', u'')}."
1698     )
1699
1700     columns = table.get(u"columns", None)
1701     if not columns:
1702         logging.error(
1703             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1704         )
1705         return
1706
1707     cols = list()
1708     for idx, col in enumerate(columns):
1709         if col.get(u"data-set", None) is None:
1710             logging.warning(f"No data for column {col.get(u'title', u'')}")
1711             continue
1712         tag = col.get(u"tag", None)
1713         data = input_data.filter_data(
1714             table,
1715             params=[
1716                 u"throughput",
1717                 u"result",
1718                 u"latency",
1719                 u"name",
1720                 u"parent",
1721                 u"tags"
1722             ],
1723             data=col[u"data-set"],
1724             continue_on_error=True
1725         )
1726         col_data = {
1727             u"title": col.get(u"title", f"Column{idx}"),
1728             u"data": dict()
1729         }
1730         for builds in data.values:
1731             for build in builds:
1732                 for tst_name, tst_data in build.items():
1733                     if tag and tag not in tst_data[u"tags"]:
1734                         continue
1735                     tst_name_mod = \
1736                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1737                         replace(u"2n1l-", u"")
1738                     if col_data[u"data"].get(tst_name_mod, None) is None:
1739                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1740                         if u"across testbeds" in table[u"title"].lower() or \
1741                                 u"across topologies" in table[u"title"].lower():
1742                             name = _tpc_modify_displayed_test_name(name)
1743                         col_data[u"data"][tst_name_mod] = {
1744                             u"name": name,
1745                             u"replace": True,
1746                             u"data": list(),
1747                             u"mean": None,
1748                             u"stdev": None
1749                         }
1750                     _tpc_insert_data(
1751                         target=col_data[u"data"][tst_name_mod],
1752                         src=tst_data,
1753                         include_tests=table[u"include-tests"]
1754                     )
1755
1756         replacement = col.get(u"data-replacement", None)
1757         if replacement:
1758             rpl_data = input_data.filter_data(
1759                 table,
1760                 params=[
1761                     u"throughput",
1762                     u"result",
1763                     u"latency",
1764                     u"name",
1765                     u"parent",
1766                     u"tags"
1767                 ],
1768                 data=replacement,
1769                 continue_on_error=True
1770             )
1771             for builds in rpl_data.values:
1772                 for build in builds:
1773                     for tst_name, tst_data in build.items():
1774                         if tag and tag not in tst_data[u"tags"]:
1775                             continue
1776                         tst_name_mod = \
1777                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1778                             replace(u"2n1l-", u"")
1779                         if col_data[u"data"].get(tst_name_mod, None) is None:
1780                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1781                             if u"across testbeds" in table[u"title"].lower() \
1782                                     or u"across topologies" in \
1783                                     table[u"title"].lower():
1784                                 name = _tpc_modify_displayed_test_name(name)
1785                             col_data[u"data"][tst_name_mod] = {
1786                                 u"name": name,
1787                                 u"replace": False,
1788                                 u"data": list(),
1789                                 u"mean": None,
1790                                 u"stdev": None
1791                             }
1792                         if col_data[u"data"][tst_name_mod][u"replace"]:
1793                             col_data[u"data"][tst_name_mod][u"replace"] = False
1794                             col_data[u"data"][tst_name_mod][u"data"] = list()
1795                         _tpc_insert_data(
1796                             target=col_data[u"data"][tst_name_mod],
1797                             src=tst_data,
1798                             include_tests=table[u"include-tests"]
1799                         )
1800
1801         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1802                 u"latency" in table[u"include-tests"]:
1803             for tst_name, tst_data in col_data[u"data"].items():
1804                 if tst_data[u"data"]:
1805                     tst_data[u"mean"] = mean(tst_data[u"data"])
1806                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1807
1808         cols.append(col_data)
1809
1810     tbl_dict = dict()
1811     for col in cols:
1812         for tst_name, tst_data in col[u"data"].items():
1813             if tbl_dict.get(tst_name, None) is None:
1814                 tbl_dict[tst_name] = {
1815                     "name": tst_data[u"name"]
1816                 }
1817             tbl_dict[tst_name][col[u"title"]] = {
1818                 u"mean": tst_data[u"mean"],
1819                 u"stdev": tst_data[u"stdev"]
1820             }
1821
1822     if not tbl_dict:
1823         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1824         return
1825
1826     tbl_lst = list()
1827     for tst_data in tbl_dict.values():
1828         row = [tst_data[u"name"], ]
1829         for col in cols:
1830             row.append(tst_data.get(col[u"title"], None))
1831         tbl_lst.append(row)
1832
1833     comparisons = table.get(u"comparisons", None)
1834     rcas = list()
1835     if comparisons and isinstance(comparisons, list):
1836         for idx, comp in enumerate(comparisons):
1837             try:
1838                 col_ref = int(comp[u"reference"])
1839                 col_cmp = int(comp[u"compare"])
1840             except KeyError:
1841                 logging.warning(u"Comparison: No references defined! Skipping.")
1842                 comparisons.pop(idx)
1843                 continue
1844             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1845                     col_ref == col_cmp):
1846                 logging.warning(f"Wrong values of reference={col_ref} "
1847                                 f"and/or compare={col_cmp}. Skipping.")
1848                 comparisons.pop(idx)
1849                 continue
1850             rca_file_name = comp.get(u"rca-file", None)
1851             if rca_file_name:
1852                 try:
1853                     with open(rca_file_name, u"r") as file_handler:
1854                         rcas.append(
1855                             {
1856                                 u"title": f"RCA{idx + 1}",
1857                                 u"data": load(file_handler, Loader=FullLoader)
1858                             }
1859                         )
1860                 except (YAMLError, IOError) as err:
1861                     logging.warning(
1862                         f"The RCA file {rca_file_name} does not exist or "
1863                         f"it is corrupted!"
1864                     )
1865                     logging.debug(repr(err))
1866                     rcas.append(None)
1867             else:
1868                 rcas.append(None)
1869     else:
1870         comparisons = None
1871
1872     tbl_cmp_lst = list()
1873     if comparisons:
1874         for row in tbl_lst:
1875             new_row = deepcopy(row)
1876             for comp in comparisons:
1877                 ref_itm = row[int(comp[u"reference"])]
1878                 if ref_itm is None and \
1879                         comp.get(u"reference-alt", None) is not None:
1880                     ref_itm = row[int(comp[u"reference-alt"])]
1881                 cmp_itm = row[int(comp[u"compare"])]
1882                 if ref_itm is not None and cmp_itm is not None and \
1883                         ref_itm[u"mean"] is not None and \
1884                         cmp_itm[u"mean"] is not None and \
1885                         ref_itm[u"stdev"] is not None and \
1886                         cmp_itm[u"stdev"] is not None:
1887                     try:
1888                         delta, d_stdev = relative_change_stdev(
1889                             ref_itm[u"mean"], cmp_itm[u"mean"],
1890                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1891                         )
1892                     except ZeroDivisionError:
1893                         break
1894                     if delta is None or math.isnan(delta):
1895                         break
1896                     new_row.append({
1897                         u"mean": delta * 1e6,
1898                         u"stdev": d_stdev * 1e6
1899                     })
1900                 else:
1901                     break
1902             else:
1903                 tbl_cmp_lst.append(new_row)
1904
1905     try:
1906         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1907         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1908     except TypeError as err:
1909         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1910
1911     tbl_for_csv = list()
1912     for line in tbl_cmp_lst:
1913         row = [line[0], ]
1914         for idx, itm in enumerate(line[1:]):
1915             if itm is None or not isinstance(itm, dict) or\
1916                     itm.get(u'mean', None) is None or \
1917                     itm.get(u'stdev', None) is None:
1918                 row.append(u"NT")
1919                 row.append(u"NT")
1920             else:
1921                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1922                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1923         for rca in rcas:
1924             if rca is None:
1925                 continue
1926             rca_nr = rca[u"data"].get(row[0], u"-")
1927             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1928         tbl_for_csv.append(row)
1929
1930     header_csv = [u"Test Case", ]
1931     for col in cols:
1932         header_csv.append(f"Avg({col[u'title']})")
1933         header_csv.append(f"Stdev({col[u'title']})")
1934     for comp in comparisons:
1935         header_csv.append(
1936             f"Avg({comp.get(u'title', u'')})"
1937         )
1938         header_csv.append(
1939             f"Stdev({comp.get(u'title', u'')})"
1940         )
1941     for rca in rcas:
1942         if rca:
1943             header_csv.append(rca[u"title"])
1944
1945     legend_lst = table.get(u"legend", None)
1946     if legend_lst is None:
1947         legend = u""
1948     else:
1949         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1950
1951     footnote = u""
1952     if rcas and any(rcas):
1953         footnote += u"\nRoot Cause Analysis:\n"
1954         for rca in rcas:
1955             if rca:
1956                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1957
1958     csv_file_name = f"{table[u'output-file']}-csv.csv"
1959     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1960         file_handler.write(
1961             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1962         )
1963         for test in tbl_for_csv:
1964             file_handler.write(
1965                 u",".join([f'"{item}"' for item in test]) + u"\n"
1966             )
1967         if legend_lst:
1968             for item in legend_lst:
1969                 file_handler.write(f'"{item}"\n')
1970         if footnote:
1971             for itm in footnote.split(u"\n"):
1972                 file_handler.write(f'"{itm}"\n')
1973
1974     tbl_tmp = list()
1975     max_lens = [0, ] * len(tbl_cmp_lst[0])
1976     for line in tbl_cmp_lst:
1977         row = [line[0], ]
1978         for idx, itm in enumerate(line[1:]):
1979             if itm is None or not isinstance(itm, dict) or \
1980                     itm.get(u'mean', None) is None or \
1981                     itm.get(u'stdev', None) is None:
1982                 new_itm = u"NT"
1983             else:
1984                 if idx < len(cols):
1985                     new_itm = (
1986                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1987                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1988                         replace(u"nan", u"NaN")
1989                     )
1990                 else:
1991                     new_itm = (
1992                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1993                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1994                         replace(u"nan", u"NaN")
1995                     )
1996             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1997                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1998             row.append(new_itm)
1999
2000         tbl_tmp.append(row)
2001
2002     header = [u"Test Case", ]
2003     header.extend([col[u"title"] for col in cols])
2004     header.extend([comp.get(u"title", u"") for comp in comparisons])
2005
2006     tbl_final = list()
2007     for line in tbl_tmp:
2008         row = [line[0], ]
2009         for idx, itm in enumerate(line[1:]):
2010             if itm in (u"NT", u"NaN"):
2011                 row.append(itm)
2012                 continue
2013             itm_lst = itm.rsplit(u"\u00B1", 1)
2014             itm_lst[-1] = \
2015                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2016             itm_str = u"\u00B1".join(itm_lst)
2017
2018             if idx >= len(cols):
2019                 # Diffs
2020                 rca = rcas[idx - len(cols)]
2021                 if rca:
2022                     # Add rcas to diffs
2023                     rca_nr = rca[u"data"].get(row[0], None)
2024                     if rca_nr:
2025                         hdr_len = len(header[idx + 1]) - 1
2026                         if hdr_len < 19:
2027                             hdr_len = 19
2028                         rca_nr = f"[{rca_nr}]"
2029                         itm_str = (
2030                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2031                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2032                             f"{itm_str}"
2033                         )
2034             row.append(itm_str)
2035         tbl_final.append(row)
2036
2037     # Generate csv tables:
2038     csv_file_name = f"{table[u'output-file']}.csv"
2039     logging.info(f"    Writing the file {csv_file_name}")
2040     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2041         file_handler.write(u";".join(header) + u"\n")
2042         for test in tbl_final:
2043             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2044
2045     # Generate txt table:
2046     txt_file_name = f"{table[u'output-file']}.txt"
2047     logging.info(f"    Writing the file {txt_file_name}")
2048     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2049
2050     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2051         file_handler.write(legend)
2052         file_handler.write(footnote)
2053
2054     # Generate html table:
2055     _tpc_generate_html_table(
2056         header,
2057         tbl_final,
2058         table[u'output-file'],
2059         legend=legend,
2060         footnote=footnote,
2061         sort_data=False,
2062         title=table.get(u"title", u"")
2063     )
2064
2065
2066 def table_weekly_comparison(table, in_data):
2067     """Generate the table(s) with algorithm: table_weekly_comparison
2068     specified in the specification file.
2069
2070     :param table: Table to generate.
2071     :param in_data: Data to process.
2072     :type table: pandas.Series
2073     :type in_data: InputData
2074     """
2075     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2076
2077     # Transform the data
2078     logging.info(
2079         f"    Creating the data set for the {table.get(u'type', u'')} "
2080         f"{table.get(u'title', u'')}."
2081     )
2082
2083     incl_tests = table.get(u"include-tests", None)
2084     if incl_tests not in (u"NDR", u"PDR"):
2085         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2086         return
2087
2088     nr_cols = table.get(u"nr-of-data-columns", None)
2089     if not nr_cols or nr_cols < 2:
2090         logging.error(
2091             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2092         )
2093         return
2094
2095     data = in_data.filter_data(
2096         table,
2097         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2098         continue_on_error=True
2099     )
2100
2101     header = [
2102         [u"VPP Version", ],
2103         [u"Start Timestamp", ],
2104         [u"CSIT Build", ],
2105         [u"CSIT Testbed", ]
2106     ]
2107     tbl_dict = dict()
2108     idx = 0
2109     tb_tbl = table.get(u"testbeds", None)
2110     for job_name, job_data in data.items():
2111         for build_nr, build in job_data.items():
2112             if idx >= nr_cols:
2113                 break
2114             if build.empty:
2115                 continue
2116
2117             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2118             if tb_ip and tb_tbl:
2119                 testbed = tb_tbl.get(tb_ip, u"")
2120             else:
2121                 testbed = u""
2122             header[2].insert(1, build_nr)
2123             header[3].insert(1, testbed)
2124             header[1].insert(
2125                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2126             )
2127             header[0].insert(
2128                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2129             )
2130
2131             for tst_name, tst_data in build.items():
2132                 tst_name_mod = \
2133                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2134                 if not tbl_dict.get(tst_name_mod, None):
2135                     tbl_dict[tst_name_mod] = dict(
2136                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2137                     )
2138                 try:
2139                     tbl_dict[tst_name_mod][-idx - 1] = \
2140                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2141                 except (TypeError, IndexError, KeyError, ValueError):
2142                     pass
2143             idx += 1
2144
2145     if idx < nr_cols:
2146         logging.error(u"Not enough data to build the table! Skipping")
2147         return
2148
2149     cmp_dict = dict()
2150     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2151         idx_ref = cmp.get(u"reference", None)
2152         idx_cmp = cmp.get(u"compare", None)
2153         if idx_ref is None or idx_cmp is None:
2154             continue
2155         header[0].append(
2156             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2157             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2158         )
2159         header[1].append(u"")
2160         header[2].append(u"")
2161         header[3].append(u"")
2162         for tst_name, tst_data in tbl_dict.items():
2163             if not cmp_dict.get(tst_name, None):
2164                 cmp_dict[tst_name] = list()
2165             ref_data = tst_data.get(idx_ref, None)
2166             cmp_data = tst_data.get(idx_cmp, None)
2167             if ref_data is None or cmp_data is None:
2168                 cmp_dict[tst_name].append(float(u'nan'))
2169             else:
2170                 cmp_dict[tst_name].append(
2171                     relative_change(ref_data, cmp_data)
2172                 )
2173
2174     tbl_lst_none = list()
2175     tbl_lst = list()
2176     for tst_name, tst_data in tbl_dict.items():
2177         itm_lst = [tst_data[u"name"], ]
2178         for idx in range(nr_cols):
2179             item = tst_data.get(-idx - 1, None)
2180             if item is None:
2181                 itm_lst.insert(1, None)
2182             else:
2183                 itm_lst.insert(1, round(item / 1e6, 1))
2184         itm_lst.extend(
2185             [
2186                 None if itm is None else round(itm, 1)
2187                 for itm in cmp_dict[tst_name]
2188             ]
2189         )
2190         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2191             tbl_lst_none.append(itm_lst)
2192         else:
2193             tbl_lst.append(itm_lst)
2194
2195     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2196     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2197     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2198     tbl_lst.extend(tbl_lst_none)
2199
2200     # Generate csv table:
2201     csv_file_name = f"{table[u'output-file']}.csv"
2202     logging.info(f"    Writing the file {csv_file_name}")
2203     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2204         for hdr in header:
2205             file_handler.write(u",".join(hdr) + u"\n")
2206         for test in tbl_lst:
2207             file_handler.write(u",".join(
2208                 [
2209                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2210                     replace(u"null", u"-") for item in test
2211                 ]
2212             ) + u"\n")
2213
2214     txt_file_name = f"{table[u'output-file']}.txt"
2215     logging.info(f"    Writing the file {txt_file_name}")
2216     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2217
2218     # Reorganize header in txt table
2219     txt_table = list()
2220     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2221         for line in list(file_handler):
2222             txt_table.append(line)
2223     try:
2224         txt_table.insert(5, txt_table.pop(2))
2225         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2226             file_handler.writelines(txt_table)
2227     except IndexError:
2228         pass
2229
2230     # Generate html table:
2231     hdr_html = [
2232         u"<br>".join(row) for row in zip(*header)
2233     ]
2234     _tpc_generate_html_table(
2235         hdr_html,
2236         tbl_lst,
2237         table[u'output-file'],
2238         sort_data=True,
2239         title=table.get(u"title", u""),
2240         generate_rst=False
2241     )