PAL: Fix sh-run
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     threads[idx].append([
301                         gnode,
302                         int(gdata[u"calls"]),
303                         int(gdata[u"vectors"]),
304                         int(gdata[u"suspends"]),
305                         float(gdata[u"clocks"]),
306                         float(gdata[u"vectors"] / gdata[u"calls"]) \
307                             if gdata[u"calls"] else 0.0
308                     ])
309
310             bold = ET.SubElement(tcol, u"b")
311             bold.text = (
312                 f"Host IP: {dut_data.get(u'host', '')}, "
313                 f"Socket: {dut_data.get(u'socket', '')}"
314             )
315             trow = ET.SubElement(
316                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
317             )
318             thead = ET.SubElement(
319                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
320             )
321             thead.text = u"\t"
322
323             for thread_nr, thread in threads.items():
324                 trow = ET.SubElement(
325                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
326                 )
327                 tcol = ET.SubElement(
328                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
329                 )
330                 bold = ET.SubElement(tcol, u"b")
331                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332                 trow = ET.SubElement(
333                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
334                 )
335                 for idx, col in enumerate(tbl_hdr):
336                     tcol = ET.SubElement(
337                         trow, u"td",
338                         attrib=dict(align=u"right" if idx else u"left")
339                     )
340                     font = ET.SubElement(
341                         tcol, u"font", attrib=dict(size=u"2")
342                     )
343                     bold = ET.SubElement(font, u"b")
344                     bold.text = col
345                 for row_nr, row in enumerate(thread):
346                     trow = ET.SubElement(
347                         tbl, u"tr",
348                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
349                     )
350                     for idx, col in enumerate(row):
351                         tcol = ET.SubElement(
352                             trow, u"td",
353                             attrib=dict(align=u"right" if idx else u"left")
354                         )
355                         font = ET.SubElement(
356                             tcol, u"font", attrib=dict(size=u"2")
357                         )
358                         if isinstance(col, float):
359                             font.text = f"{col:.2f}"
360                         else:
361                             font.text = str(col)
362                 trow = ET.SubElement(
363                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
364                 )
365                 thead = ET.SubElement(
366                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
367                 )
368                 thead.text = u"\t"
369
370         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371         thead = ET.SubElement(
372             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
373         )
374         font = ET.SubElement(
375             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
376         )
377         font.text = u"."
378
379         return str(ET.tostring(tbl, encoding=u"unicode"))
380
381     for suite in suites.values:
382         html_table = str()
383         for test_data in data.values:
384             if test_data[u"parent"] not in suite[u"name"]:
385                 continue
386             html_table += _generate_html_table(test_data)
387         if not html_table:
388             continue
389         try:
390             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391             with open(f"{file_name}", u'w') as html_file:
392                 logging.info(f"    Writing file: {file_name}")
393                 html_file.write(u".. raw:: html\n\n\t")
394                 html_file.write(html_table)
395                 html_file.write(u"\n\t<p><br><br></p>\n")
396         except KeyError:
397             logging.warning(u"The output file is not defined.")
398             return
399     logging.info(u"  Done.")
400
401
402 def table_merged_details(table, input_data):
403     """Generate the table(s) with algorithm: table_merged_details
404     specified in the specification file.
405
406     :param table: Table to generate.
407     :param input_data: Data to process.
408     :type table: pandas.Series
409     :type input_data: InputData
410     """
411
412     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
413
414     # Transform the data
415     logging.info(
416         f"    Creating the data set for the {table.get(u'type', u'')} "
417         f"{table.get(u'title', u'')}."
418     )
419     data = input_data.filter_data(table, continue_on_error=True)
420     data = input_data.merge_data(data)
421
422     sort_tests = table.get(u"sort", None)
423     if sort_tests:
424         args = dict(
425             inplace=True,
426             ascending=(sort_tests == u"ascending")
427         )
428         data.sort_index(**args)
429
430     suites = input_data.filter_data(
431         table, continue_on_error=True, data_set=u"suites")
432     suites = input_data.merge_data(suites)
433
434     # Prepare the header of the tables
435     header = list()
436     for column in table[u"columns"]:
437         header.append(
438             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
439         )
440
441     for suite in suites.values:
442         # Generate data
443         suite_name = suite[u"name"]
444         table_lst = list()
445         for test in data.keys():
446             if data[test][u"status"] != u"PASS" or \
447                     data[test][u"parent"] not in suite_name:
448                 continue
449             row_lst = list()
450             for column in table[u"columns"]:
451                 try:
452                     col_data = str(data[test][column[
453                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
454                     # Do not include tests with "Test Failed" in test message
455                     if u"Test Failed" in col_data:
456                         continue
457                     col_data = col_data.replace(
458                         u"No Data", u"Not Captured     "
459                     )
460                     if column[u"data"].split(u" ")[1] in (u"name", ):
461                         if len(col_data) > 30:
462                             col_data_lst = col_data.split(u"-")
463                             half = int(len(col_data_lst) / 2)
464                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
465                                        f"- |br| " \
466                                        f"{u'-'.join(col_data_lst[half:])}"
467                         col_data = f" |prein| {col_data} |preout| "
468                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
469                         # Temporary solution: remove NDR results from message:
470                         if bool(table.get(u'remove-ndr', False)):
471                             try:
472                                 col_data = col_data.split(u"\n", 1)[1]
473                             except IndexError:
474                                 pass
475                         col_data = col_data.replace(u'\n', u' |br| ').\
476                             replace(u'\r', u'').replace(u'"', u"'")
477                         col_data = f" |prein| {col_data} |preout| "
478                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479                         col_data = col_data.replace(u'\n', u' |br| ')
480                         col_data = f" |prein| {col_data[:-5]} |preout| "
481                     row_lst.append(f'"{col_data}"')
482                 except KeyError:
483                     row_lst.append(u'"Not captured"')
484             if len(row_lst) == len(table[u"columns"]):
485                 table_lst.append(row_lst)
486
487         # Write the data to file
488         if table_lst:
489             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491             logging.info(f"      Writing file: {file_name}")
492             with open(file_name, u"wt") as file_handler:
493                 file_handler.write(u",".join(header) + u"\n")
494                 for item in table_lst:
495                     file_handler.write(u",".join(item) + u"\n")
496
497     logging.info(u"  Done.")
498
499
500 def _tpc_modify_test_name(test_name, ignore_nic=False):
501     """Modify a test name by replacing its parts.
502
503     :param test_name: Test name to be modified.
504     :param ignore_nic: If True, NIC is removed from TC name.
505     :type test_name: str
506     :type ignore_nic: bool
507     :returns: Modified test name.
508     :rtype: str
509     """
510     test_name_mod = test_name.\
511         replace(u"-ndrpdr", u"").\
512         replace(u"1t1c", u"1c").\
513         replace(u"2t1c", u"1c"). \
514         replace(u"2t2c", u"2c").\
515         replace(u"4t2c", u"2c"). \
516         replace(u"4t4c", u"4c").\
517         replace(u"8t4c", u"4c")
518
519     if ignore_nic:
520         return re.sub(REGEX_NIC, u"", test_name_mod)
521     return test_name_mod
522
523
524 def _tpc_modify_displayed_test_name(test_name):
525     """Modify a test name which is displayed in a table by replacing its parts.
526
527     :param test_name: Test name to be modified.
528     :type test_name: str
529     :returns: Modified test name.
530     :rtype: str
531     """
532     return test_name.\
533         replace(u"1t1c", u"1c").\
534         replace(u"2t1c", u"1c"). \
535         replace(u"2t2c", u"2c").\
536         replace(u"4t2c", u"2c"). \
537         replace(u"4t4c", u"4c").\
538         replace(u"8t4c", u"4c")
539
540
541 def _tpc_insert_data(target, src, include_tests):
542     """Insert src data to the target structure.
543
544     :param target: Target structure where the data is placed.
545     :param src: Source data to be placed into the target structure.
546     :param include_tests: Which results will be included (MRR, NDR, PDR).
547     :type target: list
548     :type src: dict
549     :type include_tests: str
550     """
551     try:
552         if include_tests == u"MRR":
553             target[u"mean"] = src[u"result"][u"receive-rate"]
554             target[u"stdev"] = src[u"result"][u"receive-stdev"]
555         elif include_tests == u"PDR":
556             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
557         elif include_tests == u"NDR":
558             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
559         elif u"latency" in include_tests:
560             keys = include_tests.split(u"-")
561             if len(keys) == 4:
562                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
563                 target[u"data"].append(
564                     float(u"nan") if lat == -1 else lat * 1e6
565                 )
566     except (KeyError, TypeError):
567         pass
568
569
570 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
571                              footnote=u"", sort_data=True, title=u"",
572                              generate_rst=True):
573     """Generate html table from input data with simple sorting possibility.
574
575     :param header: Table header.
576     :param data: Input data to be included in the table. It is a list of lists.
577         Inner lists are rows in the table. All inner lists must be of the same
578         length. The length of these lists must be the same as the length of the
579         header.
580     :param out_file_name: The name (relative or full path) where the
581         generated html table is written.
582     :param legend: The legend to display below the table.
583     :param footnote: The footnote to display below the table (and legend).
584     :param sort_data: If True the data sorting is enabled.
585     :param title: The table (and file) title.
586     :param generate_rst: If True, wrapping rst file is generated.
587     :type header: list
588     :type data: list of lists
589     :type out_file_name: str
590     :type legend: str
591     :type footnote: str
592     :type sort_data: bool
593     :type title: str
594     :type generate_rst: bool
595     """
596
597     try:
598         idx = header.index(u"Test Case")
599     except ValueError:
600         idx = 0
601     params = {
602         u"align-hdr": (
603             [u"left", u"right"],
604             [u"left", u"left", u"right"],
605             [u"left", u"left", u"left", u"right"]
606         ),
607         u"align-itm": (
608             [u"left", u"right"],
609             [u"left", u"left", u"right"],
610             [u"left", u"left", u"left", u"right"]
611         ),
612         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
613     }
614
615     df_data = pd.DataFrame(data, columns=header)
616
617     if sort_data:
618         df_sorted = [df_data.sort_values(
619             by=[key, header[idx]], ascending=[True, True]
620             if key != header[idx] else [False, True]) for key in header]
621         df_sorted_rev = [df_data.sort_values(
622             by=[key, header[idx]], ascending=[False, True]
623             if key != header[idx] else [True, True]) for key in header]
624         df_sorted.extend(df_sorted_rev)
625     else:
626         df_sorted = df_data
627
628     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
629                    for idx in range(len(df_data))]]
630     table_header = dict(
631         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
632         fill_color=u"#7eade7",
633         align=params[u"align-hdr"][idx],
634         font=dict(
635             family=u"Courier New",
636             size=12
637         )
638     )
639
640     fig = go.Figure()
641
642     if sort_data:
643         for table in df_sorted:
644             columns = [table.get(col) for col in header]
645             fig.add_trace(
646                 go.Table(
647                     columnwidth=params[u"width"][idx],
648                     header=table_header,
649                     cells=dict(
650                         values=columns,
651                         fill_color=fill_color,
652                         align=params[u"align-itm"][idx],
653                         font=dict(
654                             family=u"Courier New",
655                             size=12
656                         )
657                     )
658                 )
659             )
660
661         buttons = list()
662         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
663         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
664         for idx, hdr in enumerate(menu_items):
665             visible = [False, ] * len(menu_items)
666             visible[idx] = True
667             buttons.append(
668                 dict(
669                     label=hdr.replace(u" [Mpps]", u""),
670                     method=u"update",
671                     args=[{u"visible": visible}],
672                 )
673             )
674
675         fig.update_layout(
676             updatemenus=[
677                 go.layout.Updatemenu(
678                     type=u"dropdown",
679                     direction=u"down",
680                     x=0.0,
681                     xanchor=u"left",
682                     y=1.002,
683                     yanchor=u"bottom",
684                     active=len(menu_items) - 1,
685                     buttons=list(buttons)
686                 )
687             ],
688         )
689     else:
690         fig.add_trace(
691             go.Table(
692                 columnwidth=params[u"width"][idx],
693                 header=table_header,
694                 cells=dict(
695                     values=[df_sorted.get(col) for col in header],
696                     fill_color=fill_color,
697                     align=params[u"align-itm"][idx],
698                     font=dict(
699                         family=u"Courier New",
700                         size=12
701                     )
702                 )
703             )
704         )
705
706     ploff.plot(
707         fig,
708         show_link=False,
709         auto_open=False,
710         filename=f"{out_file_name}_in.html"
711     )
712
713     if not generate_rst:
714         return
715
716     file_name = out_file_name.split(u"/")[-1]
717     if u"vpp" in out_file_name:
718         path = u"_tmp/src/vpp_performance_tests/comparisons/"
719     else:
720         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
721     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
722     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
723         rst_file.write(
724             u"\n"
725             u".. |br| raw:: html\n\n    <br />\n\n\n"
726             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
727             u".. |preout| raw:: html\n\n    </pre>\n\n"
728         )
729         if title:
730             rst_file.write(f"{title}\n")
731             rst_file.write(f"{u'`' * len(title)}\n\n")
732         rst_file.write(
733             u".. raw:: html\n\n"
734             f'    <iframe frameborder="0" scrolling="no" '
735             f'width="1600" height="1200" '
736             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
737             f'</iframe>\n\n'
738         )
739
740         if legend:
741             try:
742                 itm_lst = legend[1:-2].split(u"\n")
743                 rst_file.write(
744                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
745                 )
746             except IndexError as err:
747                 logging.error(f"Legend cannot be written to html file\n{err}")
748         if footnote:
749             try:
750                 itm_lst = footnote[1:].split(u"\n")
751                 rst_file.write(
752                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
753                 )
754             except IndexError as err:
755                 logging.error(f"Footnote cannot be written to html file\n{err}")
756
757
758 def table_soak_vs_ndr(table, input_data):
759     """Generate the table(s) with algorithm: table_soak_vs_ndr
760     specified in the specification file.
761
762     :param table: Table to generate.
763     :param input_data: Data to process.
764     :type table: pandas.Series
765     :type input_data: InputData
766     """
767
768     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
769
770     # Transform the data
771     logging.info(
772         f"    Creating the data set for the {table.get(u'type', u'')} "
773         f"{table.get(u'title', u'')}."
774     )
775     data = input_data.filter_data(table, continue_on_error=True)
776
777     # Prepare the header of the table
778     try:
779         header = [
780             u"Test Case",
781             f"Avg({table[u'reference'][u'title']})",
782             f"Stdev({table[u'reference'][u'title']})",
783             f"Avg({table[u'compare'][u'title']})",
784             f"Stdev{table[u'compare'][u'title']})",
785             u"Diff",
786             u"Stdev(Diff)"
787         ]
788         header_str = u";".join(header) + u"\n"
789         legend = (
790             u"\nLegend:\n"
791             f"Avg({table[u'reference'][u'title']}): "
792             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
793             f"from a series of runs of the listed tests.\n"
794             f"Stdev({table[u'reference'][u'title']}): "
795             f"Standard deviation value of {table[u'reference'][u'title']} "
796             f"[Mpps] computed from a series of runs of the listed tests.\n"
797             f"Avg({table[u'compare'][u'title']}): "
798             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
799             f"a series of runs of the listed tests.\n"
800             f"Stdev({table[u'compare'][u'title']}): "
801             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
802             f"computed from a series of runs of the listed tests.\n"
803             f"Diff({table[u'reference'][u'title']},"
804             f"{table[u'compare'][u'title']}): "
805             f"Percentage change calculated for mean values.\n"
806             u"Stdev(Diff): "
807             u"Standard deviation of percentage change calculated for mean "
808             u"values."
809         )
810     except (AttributeError, KeyError) as err:
811         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
812         return
813
814     # Create a list of available SOAK test results:
815     tbl_dict = dict()
816     for job, builds in table[u"compare"][u"data"].items():
817         for build in builds:
818             for tst_name, tst_data in data[job][str(build)].items():
819                 if tst_data[u"type"] == u"SOAK":
820                     tst_name_mod = tst_name.replace(u"-soak", u"")
821                     if tbl_dict.get(tst_name_mod, None) is None:
822                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
823                         nic = groups.group(0) if groups else u""
824                         name = (
825                             f"{nic}-"
826                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
827                         )
828                         tbl_dict[tst_name_mod] = {
829                             u"name": name,
830                             u"ref-data": list(),
831                             u"cmp-data": list()
832                         }
833                     try:
834                         tbl_dict[tst_name_mod][u"cmp-data"].append(
835                             tst_data[u"throughput"][u"LOWER"])
836                     except (KeyError, TypeError):
837                         pass
838     tests_lst = tbl_dict.keys()
839
840     # Add corresponding NDR test results:
841     for job, builds in table[u"reference"][u"data"].items():
842         for build in builds:
843             for tst_name, tst_data in data[job][str(build)].items():
844                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
845                     replace(u"-mrr", u"")
846                 if tst_name_mod not in tests_lst:
847                     continue
848                 try:
849                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
850                         continue
851                     if table[u"include-tests"] == u"MRR":
852                         result = (tst_data[u"result"][u"receive-rate"],
853                                   tst_data[u"result"][u"receive-stdev"])
854                     elif table[u"include-tests"] == u"PDR":
855                         result = \
856                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
857                     elif table[u"include-tests"] == u"NDR":
858                         result = \
859                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
860                     else:
861                         result = None
862                     if result is not None:
863                         tbl_dict[tst_name_mod][u"ref-data"].append(
864                             result)
865                 except (KeyError, TypeError):
866                     continue
867
868     tbl_lst = list()
869     for tst_name in tbl_dict:
870         item = [tbl_dict[tst_name][u"name"], ]
871         data_r = tbl_dict[tst_name][u"ref-data"]
872         if data_r:
873             if table[u"include-tests"] == u"MRR":
874                 data_r_mean = data_r[0][0]
875                 data_r_stdev = data_r[0][1]
876             else:
877                 data_r_mean = mean(data_r)
878                 data_r_stdev = stdev(data_r)
879             item.append(round(data_r_mean / 1e6, 1))
880             item.append(round(data_r_stdev / 1e6, 1))
881         else:
882             data_r_mean = None
883             data_r_stdev = None
884             item.extend([None, None])
885         data_c = tbl_dict[tst_name][u"cmp-data"]
886         if data_c:
887             if table[u"include-tests"] == u"MRR":
888                 data_c_mean = data_c[0][0]
889                 data_c_stdev = data_c[0][1]
890             else:
891                 data_c_mean = mean(data_c)
892                 data_c_stdev = stdev(data_c)
893             item.append(round(data_c_mean / 1e6, 1))
894             item.append(round(data_c_stdev / 1e6, 1))
895         else:
896             data_c_mean = None
897             data_c_stdev = None
898             item.extend([None, None])
899         if data_r_mean is not None and data_c_mean is not None:
900             delta, d_stdev = relative_change_stdev(
901                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
902             try:
903                 item.append(round(delta))
904             except ValueError:
905                 item.append(delta)
906             try:
907                 item.append(round(d_stdev))
908             except ValueError:
909                 item.append(d_stdev)
910             tbl_lst.append(item)
911
912     # Sort the table according to the relative change
913     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
914
915     # Generate csv tables:
916     csv_file_name = f"{table[u'output-file']}.csv"
917     with open(csv_file_name, u"wt") as file_handler:
918         file_handler.write(header_str)
919         for test in tbl_lst:
920             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
921
922     convert_csv_to_pretty_txt(
923         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
924     )
925     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
926         file_handler.write(legend)
927
928     # Generate html table:
929     _tpc_generate_html_table(
930         header,
931         tbl_lst,
932         table[u'output-file'],
933         legend=legend,
934         title=table.get(u"title", u"")
935     )
936
937
938 def table_perf_trending_dash(table, input_data):
939     """Generate the table(s) with algorithm:
940     table_perf_trending_dash
941     specified in the specification file.
942
943     :param table: Table to generate.
944     :param input_data: Data to process.
945     :type table: pandas.Series
946     :type input_data: InputData
947     """
948
949     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
950
951     # Transform the data
952     logging.info(
953         f"    Creating the data set for the {table.get(u'type', u'')} "
954         f"{table.get(u'title', u'')}."
955     )
956     data = input_data.filter_data(table, continue_on_error=True)
957
958     # Prepare the header of the tables
959     header = [
960         u"Test Case",
961         u"Trend [Mpps]",
962         u"Short-Term Change [%]",
963         u"Long-Term Change [%]",
964         u"Regressions [#]",
965         u"Progressions [#]"
966     ]
967     header_str = u",".join(header) + u"\n"
968
969     incl_tests = table.get(u"include-tests", u"MRR")
970
971     # Prepare data to the table:
972     tbl_dict = dict()
973     for job, builds in table[u"data"].items():
974         for build in builds:
975             for tst_name, tst_data in data[job][str(build)].items():
976                 if tst_name.lower() in table.get(u"ignore-list", list()):
977                     continue
978                 if tbl_dict.get(tst_name, None) is None:
979                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
980                     if not groups:
981                         continue
982                     nic = groups.group(0)
983                     tbl_dict[tst_name] = {
984                         u"name": f"{nic}-{tst_data[u'name']}",
985                         u"data": OrderedDict()
986                     }
987                 try:
988                     if incl_tests == u"MRR":
989                         tbl_dict[tst_name][u"data"][str(build)] = \
990                             tst_data[u"result"][u"receive-rate"]
991                     elif incl_tests == u"NDR":
992                         tbl_dict[tst_name][u"data"][str(build)] = \
993                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
994                     elif incl_tests == u"PDR":
995                         tbl_dict[tst_name][u"data"][str(build)] = \
996                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
997                 except (TypeError, KeyError):
998                     pass  # No data in output.xml for this test
999
1000     tbl_lst = list()
1001     for tst_name in tbl_dict:
1002         data_t = tbl_dict[tst_name][u"data"]
1003         if len(data_t) < 2:
1004             continue
1005
1006         try:
1007             classification_lst, avgs, _ = classify_anomalies(data_t)
1008         except ValueError as err:
1009             logging.info(f"{err} Skipping")
1010             return
1011
1012         win_size = min(len(data_t), table[u"window"])
1013         long_win_size = min(len(data_t), table[u"long-trend-window"])
1014
1015         try:
1016             max_long_avg = max(
1017                 [x for x in avgs[-long_win_size:-win_size]
1018                  if not isnan(x)])
1019         except ValueError:
1020             max_long_avg = nan
1021         last_avg = avgs[-1]
1022         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1023
1024         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1025             rel_change_last = nan
1026         else:
1027             rel_change_last = round(
1028                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1029
1030         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1031             rel_change_long = nan
1032         else:
1033             rel_change_long = round(
1034                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1035
1036         if classification_lst:
1037             if isnan(rel_change_last) and isnan(rel_change_long):
1038                 continue
1039             if isnan(last_avg) or isnan(rel_change_last) or \
1040                     isnan(rel_change_long):
1041                 continue
1042             tbl_lst.append(
1043                 [tbl_dict[tst_name][u"name"],
1044                  round(last_avg / 1e6, 2),
1045                  rel_change_last,
1046                  rel_change_long,
1047                  classification_lst[-win_size+1:].count(u"regression"),
1048                  classification_lst[-win_size+1:].count(u"progression")])
1049
1050     tbl_lst.sort(key=lambda rel: rel[0])
1051     tbl_lst.sort(key=lambda rel: rel[3])
1052     tbl_lst.sort(key=lambda rel: rel[2])
1053
1054     tbl_sorted = list()
1055     for nrr in range(table[u"window"], -1, -1):
1056         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1057         for nrp in range(table[u"window"], -1, -1):
1058             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1059             tbl_sorted.extend(tbl_out)
1060
1061     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1062
1063     logging.info(f"    Writing file: {file_name}")
1064     with open(file_name, u"wt") as file_handler:
1065         file_handler.write(header_str)
1066         for test in tbl_sorted:
1067             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1068
1069     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1070     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1071
1072
1073 def _generate_url(testbed, test_name):
1074     """Generate URL to a trending plot from the name of the test case.
1075
1076     :param testbed: The testbed used for testing.
1077     :param test_name: The name of the test case.
1078     :type testbed: str
1079     :type test_name: str
1080     :returns: The URL to the plot with the trending data for the given test
1081         case.
1082     :rtype str
1083     """
1084
1085     if u"x520" in test_name:
1086         nic = u"x520"
1087     elif u"x710" in test_name:
1088         nic = u"x710"
1089     elif u"xl710" in test_name:
1090         nic = u"xl710"
1091     elif u"xxv710" in test_name:
1092         nic = u"xxv710"
1093     elif u"vic1227" in test_name:
1094         nic = u"vic1227"
1095     elif u"vic1385" in test_name:
1096         nic = u"vic1385"
1097     elif u"x553" in test_name:
1098         nic = u"x553"
1099     elif u"cx556" in test_name or u"cx556a" in test_name:
1100         nic = u"cx556a"
1101     elif u"ena" in test_name:
1102         nic = u"nitro50g"
1103     else:
1104         nic = u""
1105
1106     if u"64b" in test_name:
1107         frame_size = u"64b"
1108     elif u"78b" in test_name:
1109         frame_size = u"78b"
1110     elif u"imix" in test_name:
1111         frame_size = u"imix"
1112     elif u"9000b" in test_name:
1113         frame_size = u"9000b"
1114     elif u"1518b" in test_name:
1115         frame_size = u"1518b"
1116     elif u"114b" in test_name:
1117         frame_size = u"114b"
1118     else:
1119         frame_size = u""
1120
1121     if u"1t1c" in test_name or \
1122         (u"-1c-" in test_name and
1123          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1124         cores = u"1t1c"
1125     elif u"2t2c" in test_name or \
1126          (u"-2c-" in test_name and
1127           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1128         cores = u"2t2c"
1129     elif u"4t4c" in test_name or \
1130          (u"-4c-" in test_name and
1131           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1132         cores = u"4t4c"
1133     elif u"2t1c" in test_name or \
1134          (u"-1c-" in test_name and
1135           testbed in
1136           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1137         cores = u"2t1c"
1138     elif u"4t2c" in test_name or \
1139          (u"-2c-" in test_name and
1140           testbed in
1141           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1142         cores = u"4t2c"
1143     elif u"8t4c" in test_name or \
1144          (u"-4c-" in test_name and
1145           testbed in
1146           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1147         cores = u"8t4c"
1148     else:
1149         cores = u""
1150
1151     if u"testpmd" in test_name:
1152         driver = u"testpmd"
1153     elif u"l3fwd" in test_name:
1154         driver = u"l3fwd"
1155     elif u"avf" in test_name:
1156         driver = u"avf"
1157     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1158         driver = u"af_xdp"
1159     elif u"rdma" in test_name:
1160         driver = u"rdma"
1161     elif u"dnv" in testbed or u"tsh" in testbed:
1162         driver = u"ixgbe"
1163     elif u"ena" in test_name:
1164         driver = u"ena"
1165     else:
1166         driver = u"dpdk"
1167
1168     if u"macip-iacl1s" in test_name:
1169         bsf = u"features-macip-iacl1"
1170     elif u"macip-iacl10s" in test_name:
1171         bsf = u"features-macip-iacl10"
1172     elif u"macip-iacl50s" in test_name:
1173         bsf = u"features-macip-iacl50"
1174     elif u"iacl1s" in test_name:
1175         bsf = u"features-iacl1"
1176     elif u"iacl10s" in test_name:
1177         bsf = u"features-iacl10"
1178     elif u"iacl50s" in test_name:
1179         bsf = u"features-iacl50"
1180     elif u"oacl1s" in test_name:
1181         bsf = u"features-oacl1"
1182     elif u"oacl10s" in test_name:
1183         bsf = u"features-oacl10"
1184     elif u"oacl50s" in test_name:
1185         bsf = u"features-oacl50"
1186     elif u"nat44det" in test_name:
1187         bsf = u"nat44det-bidir"
1188     elif u"nat44ed" in test_name and u"udir" in test_name:
1189         bsf = u"nat44ed-udir"
1190     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1191         bsf = u"udp-cps"
1192     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1193         bsf = u"tcp-cps"
1194     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1195         bsf = u"udp-pps"
1196     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1197         bsf = u"tcp-pps"
1198     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1199         bsf = u"udp-tput"
1200     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1201         bsf = u"tcp-tput"
1202     elif u"udpsrcscale" in test_name:
1203         bsf = u"features-udp"
1204     elif u"iacl" in test_name:
1205         bsf = u"features"
1206     elif u"policer" in test_name:
1207         bsf = u"features"
1208     elif u"adl" in test_name:
1209         bsf = u"features"
1210     elif u"cop" in test_name:
1211         bsf = u"features"
1212     elif u"nat" in test_name:
1213         bsf = u"features"
1214     elif u"macip" in test_name:
1215         bsf = u"features"
1216     elif u"scale" in test_name:
1217         bsf = u"scale"
1218     elif u"base" in test_name:
1219         bsf = u"base"
1220     else:
1221         bsf = u"base"
1222
1223     if u"114b" in test_name and u"vhost" in test_name:
1224         domain = u"vts"
1225     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1226         domain = u"nat44"
1227         if u"nat44det" in test_name:
1228             domain += u"-det-bidir"
1229         else:
1230             domain += u"-ed"
1231         if u"udir" in test_name:
1232             domain += u"-unidir"
1233         elif u"-ethip4udp-" in test_name:
1234             domain += u"-udp"
1235         elif u"-ethip4tcp-" in test_name:
1236             domain += u"-tcp"
1237         if u"-cps" in test_name:
1238             domain += u"-cps"
1239         elif u"-pps" in test_name:
1240             domain += u"-pps"
1241         elif u"-tput" in test_name:
1242             domain += u"-tput"
1243     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1244         domain = u"dpdk"
1245     elif u"memif" in test_name:
1246         domain = u"container_memif"
1247     elif u"srv6" in test_name:
1248         domain = u"srv6"
1249     elif u"vhost" in test_name:
1250         domain = u"vhost"
1251         if u"vppl2xc" in test_name:
1252             driver += u"-vpp"
1253         else:
1254             driver += u"-testpmd"
1255         if u"lbvpplacp" in test_name:
1256             bsf += u"-link-bonding"
1257     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1258         domain = u"nf_service_density_vnfc"
1259     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1260         domain = u"nf_service_density_cnfc"
1261     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1262         domain = u"nf_service_density_cnfp"
1263     elif u"ipsec" in test_name:
1264         domain = u"ipsec"
1265         if u"sw" in test_name:
1266             bsf += u"-sw"
1267         elif u"hw" in test_name:
1268             bsf += u"-hw"
1269     elif u"ethip4vxlan" in test_name:
1270         domain = u"ip4_tunnels"
1271     elif u"ethip4udpgeneve" in test_name:
1272         domain = u"ip4_tunnels"
1273     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1274         domain = u"ip4"
1275     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1276         domain = u"ip6"
1277     elif u"l2xcbase" in test_name or \
1278             u"l2xcscale" in test_name or \
1279             u"l2bdbasemaclrn" in test_name or \
1280             u"l2bdscale" in test_name or \
1281             u"l2patch" in test_name:
1282         domain = u"l2"
1283     else:
1284         domain = u""
1285
1286     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1287     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1288
1289     return file_name + anchor_name
1290
1291
1292 def table_perf_trending_dash_html(table, input_data):
1293     """Generate the table(s) with algorithm:
1294     table_perf_trending_dash_html specified in the specification
1295     file.
1296
1297     :param table: Table to generate.
1298     :param input_data: Data to process.
1299     :type table: dict
1300     :type input_data: InputData
1301     """
1302
1303     _ = input_data
1304
1305     if not table.get(u"testbed", None):
1306         logging.error(
1307             f"The testbed is not defined for the table "
1308             f"{table.get(u'title', u'')}. Skipping."
1309         )
1310         return
1311
1312     test_type = table.get(u"test-type", u"MRR")
1313     if test_type not in (u"MRR", u"NDR", u"PDR"):
1314         logging.error(
1315             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1316             f"Skipping."
1317         )
1318         return
1319
1320     if test_type in (u"NDR", u"PDR"):
1321         lnk_dir = u"../ndrpdr_trending/"
1322         lnk_sufix = f"-{test_type.lower()}"
1323     else:
1324         lnk_dir = u"../trending/"
1325         lnk_sufix = u""
1326
1327     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1328
1329     try:
1330         with open(table[u"input-file"], u'rt') as csv_file:
1331             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1332     except FileNotFoundError as err:
1333         logging.warning(f"{err}")
1334         return
1335     except KeyError:
1336         logging.warning(u"The input file is not defined.")
1337         return
1338     except csv.Error as err:
1339         logging.warning(
1340             f"Not possible to process the file {table[u'input-file']}.\n"
1341             f"{repr(err)}"
1342         )
1343         return
1344
1345     # Table:
1346     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1347
1348     # Table header:
1349     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1350     for idx, item in enumerate(csv_lst[0]):
1351         alignment = u"left" if idx == 0 else u"center"
1352         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1353         thead.text = item
1354
1355     # Rows:
1356     colors = {
1357         u"regression": (
1358             u"#ffcccc",
1359             u"#ff9999"
1360         ),
1361         u"progression": (
1362             u"#c6ecc6",
1363             u"#9fdf9f"
1364         ),
1365         u"normal": (
1366             u"#e9f1fb",
1367             u"#d4e4f7"
1368         )
1369     }
1370     for r_idx, row in enumerate(csv_lst[1:]):
1371         if int(row[4]):
1372             color = u"regression"
1373         elif int(row[5]):
1374             color = u"progression"
1375         else:
1376             color = u"normal"
1377         trow = ET.SubElement(
1378             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1379         )
1380
1381         # Columns:
1382         for c_idx, item in enumerate(row):
1383             tdata = ET.SubElement(
1384                 trow,
1385                 u"td",
1386                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1387             )
1388             # Name:
1389             if c_idx == 0 and table.get(u"add-links", True):
1390                 ref = ET.SubElement(
1391                     tdata,
1392                     u"a",
1393                     attrib=dict(
1394                         href=f"{lnk_dir}"
1395                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1396                         f"{lnk_sufix}"
1397                     )
1398                 )
1399                 ref.text = item
1400             else:
1401                 tdata.text = item
1402     try:
1403         with open(table[u"output-file"], u'w') as html_file:
1404             logging.info(f"    Writing file: {table[u'output-file']}")
1405             html_file.write(u".. raw:: html\n\n\t")
1406             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1407             html_file.write(u"\n\t<p><br><br></p>\n")
1408     except KeyError:
1409         logging.warning(u"The output file is not defined.")
1410         return
1411
1412
1413 def table_last_failed_tests(table, input_data):
1414     """Generate the table(s) with algorithm: table_last_failed_tests
1415     specified in the specification file.
1416
1417     :param table: Table to generate.
1418     :param input_data: Data to process.
1419     :type table: pandas.Series
1420     :type input_data: InputData
1421     """
1422
1423     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1424
1425     # Transform the data
1426     logging.info(
1427         f"    Creating the data set for the {table.get(u'type', u'')} "
1428         f"{table.get(u'title', u'')}."
1429     )
1430
1431     data = input_data.filter_data(table, continue_on_error=True)
1432
1433     if data is None or data.empty:
1434         logging.warning(
1435             f"    No data for the {table.get(u'type', u'')} "
1436             f"{table.get(u'title', u'')}."
1437         )
1438         return
1439
1440     tbl_list = list()
1441     for job, builds in table[u"data"].items():
1442         for build in builds:
1443             build = str(build)
1444             try:
1445                 version = input_data.metadata(job, build).get(u"version", u"")
1446                 duration = \
1447                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1448             except KeyError:
1449                 logging.error(f"Data for {job}: {build} is not present.")
1450                 return
1451             tbl_list.append(build)
1452             tbl_list.append(version)
1453             failed_tests = list()
1454             passed = 0
1455             failed = 0
1456             for tst_data in data[job][build].values:
1457                 if tst_data[u"status"] != u"FAIL":
1458                     passed += 1
1459                     continue
1460                 failed += 1
1461                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1462                 if not groups:
1463                     continue
1464                 nic = groups.group(0)
1465                 msg = tst_data[u'msg'].replace(u"\n", u"")
1466                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1467                              'xxx.xxx.xxx.xxx', msg)
1468                 msg = msg.split(u'Also teardown failed')[0]
1469                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1470             tbl_list.append(passed)
1471             tbl_list.append(failed)
1472             tbl_list.append(duration)
1473             tbl_list.extend(failed_tests)
1474
1475     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1476     logging.info(f"    Writing file: {file_name}")
1477     with open(file_name, u"wt") as file_handler:
1478         for test in tbl_list:
1479             file_handler.write(f"{test}\n")
1480
1481
1482 def table_failed_tests(table, input_data):
1483     """Generate the table(s) with algorithm: table_failed_tests
1484     specified in the specification file.
1485
1486     :param table: Table to generate.
1487     :param input_data: Data to process.
1488     :type table: pandas.Series
1489     :type input_data: InputData
1490     """
1491
1492     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1493
1494     # Transform the data
1495     logging.info(
1496         f"    Creating the data set for the {table.get(u'type', u'')} "
1497         f"{table.get(u'title', u'')}."
1498     )
1499     data = input_data.filter_data(table, continue_on_error=True)
1500
1501     test_type = u"MRR"
1502     if u"NDRPDR" in table.get(u"filter", list()):
1503         test_type = u"NDRPDR"
1504
1505     # Prepare the header of the tables
1506     header = [
1507         u"Test Case",
1508         u"Failures [#]",
1509         u"Last Failure [Time]",
1510         u"Last Failure [VPP-Build-Id]",
1511         u"Last Failure [CSIT-Job-Build-Id]"
1512     ]
1513
1514     # Generate the data for the table according to the model in the table
1515     # specification
1516
1517     now = dt.utcnow()
1518     timeperiod = timedelta(int(table.get(u"window", 7)))
1519
1520     tbl_dict = dict()
1521     for job, builds in table[u"data"].items():
1522         for build in builds:
1523             build = str(build)
1524             for tst_name, tst_data in data[job][build].items():
1525                 if tst_name.lower() in table.get(u"ignore-list", list()):
1526                     continue
1527                 if tbl_dict.get(tst_name, None) is None:
1528                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1529                     if not groups:
1530                         continue
1531                     nic = groups.group(0)
1532                     tbl_dict[tst_name] = {
1533                         u"name": f"{nic}-{tst_data[u'name']}",
1534                         u"data": OrderedDict()
1535                     }
1536                 try:
1537                     generated = input_data.metadata(job, build).\
1538                         get(u"generated", u"")
1539                     if not generated:
1540                         continue
1541                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1542                     if (now - then) <= timeperiod:
1543                         tbl_dict[tst_name][u"data"][build] = (
1544                             tst_data[u"status"],
1545                             generated,
1546                             input_data.metadata(job, build).get(u"version",
1547                                                                 u""),
1548                             build
1549                         )
1550                 except (TypeError, KeyError) as err:
1551                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1552
1553     max_fails = 0
1554     tbl_lst = list()
1555     for tst_data in tbl_dict.values():
1556         fails_nr = 0
1557         fails_last_date = u""
1558         fails_last_vpp = u""
1559         fails_last_csit = u""
1560         for val in tst_data[u"data"].values():
1561             if val[0] == u"FAIL":
1562                 fails_nr += 1
1563                 fails_last_date = val[1]
1564                 fails_last_vpp = val[2]
1565                 fails_last_csit = val[3]
1566         if fails_nr:
1567             max_fails = fails_nr if fails_nr > max_fails else max_fails
1568             tbl_lst.append([
1569                 tst_data[u"name"],
1570                 fails_nr,
1571                 fails_last_date,
1572                 fails_last_vpp,
1573                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1574                 f"-build-{fails_last_csit}"
1575             ])
1576
1577     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1578     tbl_sorted = list()
1579     for nrf in range(max_fails, -1, -1):
1580         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1581         tbl_sorted.extend(tbl_fails)
1582
1583     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1584     logging.info(f"    Writing file: {file_name}")
1585     with open(file_name, u"wt") as file_handler:
1586         file_handler.write(u",".join(header) + u"\n")
1587         for test in tbl_sorted:
1588             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1589
1590     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1591     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1592
1593
1594 def table_failed_tests_html(table, input_data):
1595     """Generate the table(s) with algorithm: table_failed_tests_html
1596     specified in the specification file.
1597
1598     :param table: Table to generate.
1599     :param input_data: Data to process.
1600     :type table: pandas.Series
1601     :type input_data: InputData
1602     """
1603
1604     _ = input_data
1605
1606     if not table.get(u"testbed", None):
1607         logging.error(
1608             f"The testbed is not defined for the table "
1609             f"{table.get(u'title', u'')}. Skipping."
1610         )
1611         return
1612
1613     test_type = table.get(u"test-type", u"MRR")
1614     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1615         logging.error(
1616             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1617             f"Skipping."
1618         )
1619         return
1620
1621     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1622         lnk_dir = u"../ndrpdr_trending/"
1623         lnk_sufix = u"-pdr"
1624     else:
1625         lnk_dir = u"../trending/"
1626         lnk_sufix = u""
1627
1628     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1629
1630     try:
1631         with open(table[u"input-file"], u'rt') as csv_file:
1632             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1633     except KeyError:
1634         logging.warning(u"The input file is not defined.")
1635         return
1636     except csv.Error as err:
1637         logging.warning(
1638             f"Not possible to process the file {table[u'input-file']}.\n"
1639             f"{repr(err)}"
1640         )
1641         return
1642
1643     # Table:
1644     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1645
1646     # Table header:
1647     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1648     for idx, item in enumerate(csv_lst[0]):
1649         alignment = u"left" if idx == 0 else u"center"
1650         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1651         thead.text = item
1652
1653     # Rows:
1654     colors = (u"#e9f1fb", u"#d4e4f7")
1655     for r_idx, row in enumerate(csv_lst[1:]):
1656         background = colors[r_idx % 2]
1657         trow = ET.SubElement(
1658             failed_tests, u"tr", attrib=dict(bgcolor=background)
1659         )
1660
1661         # Columns:
1662         for c_idx, item in enumerate(row):
1663             tdata = ET.SubElement(
1664                 trow,
1665                 u"td",
1666                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1667             )
1668             # Name:
1669             if c_idx == 0 and table.get(u"add-links", True):
1670                 ref = ET.SubElement(
1671                     tdata,
1672                     u"a",
1673                     attrib=dict(
1674                         href=f"{lnk_dir}"
1675                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1676                         f"{lnk_sufix}"
1677                     )
1678                 )
1679                 ref.text = item
1680             else:
1681                 tdata.text = item
1682     try:
1683         with open(table[u"output-file"], u'w') as html_file:
1684             logging.info(f"    Writing file: {table[u'output-file']}")
1685             html_file.write(u".. raw:: html\n\n\t")
1686             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1687             html_file.write(u"\n\t<p><br><br></p>\n")
1688     except KeyError:
1689         logging.warning(u"The output file is not defined.")
1690         return
1691
1692
1693 def table_comparison(table, input_data):
1694     """Generate the table(s) with algorithm: table_comparison
1695     specified in the specification file.
1696
1697     :param table: Table to generate.
1698     :param input_data: Data to process.
1699     :type table: pandas.Series
1700     :type input_data: InputData
1701     """
1702     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1703
1704     # Transform the data
1705     logging.info(
1706         f"    Creating the data set for the {table.get(u'type', u'')} "
1707         f"{table.get(u'title', u'')}."
1708     )
1709
1710     columns = table.get(u"columns", None)
1711     if not columns:
1712         logging.error(
1713             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1714         )
1715         return
1716
1717     cols = list()
1718     for idx, col in enumerate(columns):
1719         if col.get(u"data-set", None) is None:
1720             logging.warning(f"No data for column {col.get(u'title', u'')}")
1721             continue
1722         tag = col.get(u"tag", None)
1723         data = input_data.filter_data(
1724             table,
1725             params=[
1726                 u"throughput",
1727                 u"result",
1728                 u"latency",
1729                 u"name",
1730                 u"parent",
1731                 u"tags"
1732             ],
1733             data=col[u"data-set"],
1734             continue_on_error=True
1735         )
1736         col_data = {
1737             u"title": col.get(u"title", f"Column{idx}"),
1738             u"data": dict()
1739         }
1740         for builds in data.values:
1741             for build in builds:
1742                 for tst_name, tst_data in build.items():
1743                     if tag and tag not in tst_data[u"tags"]:
1744                         continue
1745                     tst_name_mod = \
1746                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1747                         replace(u"2n1l-", u"")
1748                     if col_data[u"data"].get(tst_name_mod, None) is None:
1749                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1750                         if u"across testbeds" in table[u"title"].lower() or \
1751                                 u"across topologies" in table[u"title"].lower():
1752                             name = _tpc_modify_displayed_test_name(name)
1753                         col_data[u"data"][tst_name_mod] = {
1754                             u"name": name,
1755                             u"replace": True,
1756                             u"data": list(),
1757                             u"mean": None,
1758                             u"stdev": None
1759                         }
1760                     _tpc_insert_data(
1761                         target=col_data[u"data"][tst_name_mod],
1762                         src=tst_data,
1763                         include_tests=table[u"include-tests"]
1764                     )
1765
1766         replacement = col.get(u"data-replacement", None)
1767         if replacement:
1768             rpl_data = input_data.filter_data(
1769                 table,
1770                 params=[
1771                     u"throughput",
1772                     u"result",
1773                     u"latency",
1774                     u"name",
1775                     u"parent",
1776                     u"tags"
1777                 ],
1778                 data=replacement,
1779                 continue_on_error=True
1780             )
1781             for builds in rpl_data.values:
1782                 for build in builds:
1783                     for tst_name, tst_data in build.items():
1784                         if tag and tag not in tst_data[u"tags"]:
1785                             continue
1786                         tst_name_mod = \
1787                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1788                             replace(u"2n1l-", u"")
1789                         if col_data[u"data"].get(tst_name_mod, None) is None:
1790                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1791                             if u"across testbeds" in table[u"title"].lower() \
1792                                     or u"across topologies" in \
1793                                     table[u"title"].lower():
1794                                 name = _tpc_modify_displayed_test_name(name)
1795                             col_data[u"data"][tst_name_mod] = {
1796                                 u"name": name,
1797                                 u"replace": False,
1798                                 u"data": list(),
1799                                 u"mean": None,
1800                                 u"stdev": None
1801                             }
1802                         if col_data[u"data"][tst_name_mod][u"replace"]:
1803                             col_data[u"data"][tst_name_mod][u"replace"] = False
1804                             col_data[u"data"][tst_name_mod][u"data"] = list()
1805                         _tpc_insert_data(
1806                             target=col_data[u"data"][tst_name_mod],
1807                             src=tst_data,
1808                             include_tests=table[u"include-tests"]
1809                         )
1810
1811         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1812                 u"latency" in table[u"include-tests"]:
1813             for tst_name, tst_data in col_data[u"data"].items():
1814                 if tst_data[u"data"]:
1815                     tst_data[u"mean"] = mean(tst_data[u"data"])
1816                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1817
1818         cols.append(col_data)
1819
1820     tbl_dict = dict()
1821     for col in cols:
1822         for tst_name, tst_data in col[u"data"].items():
1823             if tbl_dict.get(tst_name, None) is None:
1824                 tbl_dict[tst_name] = {
1825                     "name": tst_data[u"name"]
1826                 }
1827             tbl_dict[tst_name][col[u"title"]] = {
1828                 u"mean": tst_data[u"mean"],
1829                 u"stdev": tst_data[u"stdev"]
1830             }
1831
1832     if not tbl_dict:
1833         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1834         return
1835
1836     tbl_lst = list()
1837     for tst_data in tbl_dict.values():
1838         row = [tst_data[u"name"], ]
1839         for col in cols:
1840             row.append(tst_data.get(col[u"title"], None))
1841         tbl_lst.append(row)
1842
1843     comparisons = table.get(u"comparisons", None)
1844     rcas = list()
1845     if comparisons and isinstance(comparisons, list):
1846         for idx, comp in enumerate(comparisons):
1847             try:
1848                 col_ref = int(comp[u"reference"])
1849                 col_cmp = int(comp[u"compare"])
1850             except KeyError:
1851                 logging.warning(u"Comparison: No references defined! Skipping.")
1852                 comparisons.pop(idx)
1853                 continue
1854             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1855                     col_ref == col_cmp):
1856                 logging.warning(f"Wrong values of reference={col_ref} "
1857                                 f"and/or compare={col_cmp}. Skipping.")
1858                 comparisons.pop(idx)
1859                 continue
1860             rca_file_name = comp.get(u"rca-file", None)
1861             if rca_file_name:
1862                 try:
1863                     with open(rca_file_name, u"r") as file_handler:
1864                         rcas.append(
1865                             {
1866                                 u"title": f"RCA{idx + 1}",
1867                                 u"data": load(file_handler, Loader=FullLoader)
1868                             }
1869                         )
1870                 except (YAMLError, IOError) as err:
1871                     logging.warning(
1872                         f"The RCA file {rca_file_name} does not exist or "
1873                         f"it is corrupted!"
1874                     )
1875                     logging.debug(repr(err))
1876                     rcas.append(None)
1877             else:
1878                 rcas.append(None)
1879     else:
1880         comparisons = None
1881
1882     tbl_cmp_lst = list()
1883     if comparisons:
1884         for row in tbl_lst:
1885             new_row = deepcopy(row)
1886             for comp in comparisons:
1887                 ref_itm = row[int(comp[u"reference"])]
1888                 if ref_itm is None and \
1889                         comp.get(u"reference-alt", None) is not None:
1890                     ref_itm = row[int(comp[u"reference-alt"])]
1891                 cmp_itm = row[int(comp[u"compare"])]
1892                 if ref_itm is not None and cmp_itm is not None and \
1893                         ref_itm[u"mean"] is not None and \
1894                         cmp_itm[u"mean"] is not None and \
1895                         ref_itm[u"stdev"] is not None and \
1896                         cmp_itm[u"stdev"] is not None:
1897                     try:
1898                         delta, d_stdev = relative_change_stdev(
1899                             ref_itm[u"mean"], cmp_itm[u"mean"],
1900                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1901                         )
1902                     except ZeroDivisionError:
1903                         break
1904                     if delta is None or math.isnan(delta):
1905                         break
1906                     new_row.append({
1907                         u"mean": delta * 1e6,
1908                         u"stdev": d_stdev * 1e6
1909                     })
1910                 else:
1911                     break
1912             else:
1913                 tbl_cmp_lst.append(new_row)
1914
1915     try:
1916         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1917         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1918     except TypeError as err:
1919         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1920
1921     tbl_for_csv = list()
1922     for line in tbl_cmp_lst:
1923         row = [line[0], ]
1924         for idx, itm in enumerate(line[1:]):
1925             if itm is None or not isinstance(itm, dict) or\
1926                     itm.get(u'mean', None) is None or \
1927                     itm.get(u'stdev', None) is None:
1928                 row.append(u"NT")
1929                 row.append(u"NT")
1930             else:
1931                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1932                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1933         for rca in rcas:
1934             if rca is None:
1935                 continue
1936             rca_nr = rca[u"data"].get(row[0], u"-")
1937             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1938         tbl_for_csv.append(row)
1939
1940     header_csv = [u"Test Case", ]
1941     for col in cols:
1942         header_csv.append(f"Avg({col[u'title']})")
1943         header_csv.append(f"Stdev({col[u'title']})")
1944     for comp in comparisons:
1945         header_csv.append(
1946             f"Avg({comp.get(u'title', u'')})"
1947         )
1948         header_csv.append(
1949             f"Stdev({comp.get(u'title', u'')})"
1950         )
1951     for rca in rcas:
1952         if rca:
1953             header_csv.append(rca[u"title"])
1954
1955     legend_lst = table.get(u"legend", None)
1956     if legend_lst is None:
1957         legend = u""
1958     else:
1959         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1960
1961     footnote = u""
1962     if rcas and any(rcas):
1963         footnote += u"\nRoot Cause Analysis:\n"
1964         for rca in rcas:
1965             if rca:
1966                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1967
1968     csv_file_name = f"{table[u'output-file']}-csv.csv"
1969     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1970         file_handler.write(
1971             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1972         )
1973         for test in tbl_for_csv:
1974             file_handler.write(
1975                 u",".join([f'"{item}"' for item in test]) + u"\n"
1976             )
1977         if legend_lst:
1978             for item in legend_lst:
1979                 file_handler.write(f'"{item}"\n')
1980         if footnote:
1981             for itm in footnote.split(u"\n"):
1982                 file_handler.write(f'"{itm}"\n')
1983
1984     tbl_tmp = list()
1985     max_lens = [0, ] * len(tbl_cmp_lst[0])
1986     for line in tbl_cmp_lst:
1987         row = [line[0], ]
1988         for idx, itm in enumerate(line[1:]):
1989             if itm is None or not isinstance(itm, dict) or \
1990                     itm.get(u'mean', None) is None or \
1991                     itm.get(u'stdev', None) is None:
1992                 new_itm = u"NT"
1993             else:
1994                 if idx < len(cols):
1995                     new_itm = (
1996                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
1997                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
1998                         replace(u"nan", u"NaN")
1999                     )
2000                 else:
2001                     new_itm = (
2002                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2003                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2004                         replace(u"nan", u"NaN")
2005                     )
2006             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2007                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2008             row.append(new_itm)
2009
2010         tbl_tmp.append(row)
2011
2012     header = [u"Test Case", ]
2013     header.extend([col[u"title"] for col in cols])
2014     header.extend([comp.get(u"title", u"") for comp in comparisons])
2015
2016     tbl_final = list()
2017     for line in tbl_tmp:
2018         row = [line[0], ]
2019         for idx, itm in enumerate(line[1:]):
2020             if itm in (u"NT", u"NaN"):
2021                 row.append(itm)
2022                 continue
2023             itm_lst = itm.rsplit(u"\u00B1", 1)
2024             itm_lst[-1] = \
2025                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2026             itm_str = u"\u00B1".join(itm_lst)
2027
2028             if idx >= len(cols):
2029                 # Diffs
2030                 rca = rcas[idx - len(cols)]
2031                 if rca:
2032                     # Add rcas to diffs
2033                     rca_nr = rca[u"data"].get(row[0], None)
2034                     if rca_nr:
2035                         hdr_len = len(header[idx + 1]) - 1
2036                         if hdr_len < 19:
2037                             hdr_len = 19
2038                         rca_nr = f"[{rca_nr}]"
2039                         itm_str = (
2040                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2041                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2042                             f"{itm_str}"
2043                         )
2044             row.append(itm_str)
2045         tbl_final.append(row)
2046
2047     # Generate csv tables:
2048     csv_file_name = f"{table[u'output-file']}.csv"
2049     logging.info(f"    Writing the file {csv_file_name}")
2050     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2051         file_handler.write(u";".join(header) + u"\n")
2052         for test in tbl_final:
2053             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2054
2055     # Generate txt table:
2056     txt_file_name = f"{table[u'output-file']}.txt"
2057     logging.info(f"    Writing the file {txt_file_name}")
2058     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2059
2060     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2061         file_handler.write(legend)
2062         file_handler.write(footnote)
2063
2064     # Generate html table:
2065     _tpc_generate_html_table(
2066         header,
2067         tbl_final,
2068         table[u'output-file'],
2069         legend=legend,
2070         footnote=footnote,
2071         sort_data=False,
2072         title=table.get(u"title", u"")
2073     )
2074
2075
2076 def table_weekly_comparison(table, in_data):
2077     """Generate the table(s) with algorithm: table_weekly_comparison
2078     specified in the specification file.
2079
2080     :param table: Table to generate.
2081     :param in_data: Data to process.
2082     :type table: pandas.Series
2083     :type in_data: InputData
2084     """
2085     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2086
2087     # Transform the data
2088     logging.info(
2089         f"    Creating the data set for the {table.get(u'type', u'')} "
2090         f"{table.get(u'title', u'')}."
2091     )
2092
2093     incl_tests = table.get(u"include-tests", None)
2094     if incl_tests not in (u"NDR", u"PDR"):
2095         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2096         return
2097
2098     nr_cols = table.get(u"nr-of-data-columns", None)
2099     if not nr_cols or nr_cols < 2:
2100         logging.error(
2101             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2102         )
2103         return
2104
2105     data = in_data.filter_data(
2106         table,
2107         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2108         continue_on_error=True
2109     )
2110
2111     header = [
2112         [u"VPP Version", ],
2113         [u"Start Timestamp", ],
2114         [u"CSIT Build", ],
2115         [u"CSIT Testbed", ]
2116     ]
2117     tbl_dict = dict()
2118     idx = 0
2119     tb_tbl = table.get(u"testbeds", None)
2120     for job_name, job_data in data.items():
2121         for build_nr, build in job_data.items():
2122             if idx >= nr_cols:
2123                 break
2124             if build.empty:
2125                 continue
2126
2127             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2128             if tb_ip and tb_tbl:
2129                 testbed = tb_tbl.get(tb_ip, u"")
2130             else:
2131                 testbed = u""
2132             header[2].insert(1, build_nr)
2133             header[3].insert(1, testbed)
2134             header[1].insert(
2135                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2136             )
2137             header[0].insert(
2138                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2139             )
2140
2141             for tst_name, tst_data in build.items():
2142                 tst_name_mod = \
2143                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2144                 if not tbl_dict.get(tst_name_mod, None):
2145                     tbl_dict[tst_name_mod] = dict(
2146                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2147                     )
2148                 try:
2149                     tbl_dict[tst_name_mod][-idx - 1] = \
2150                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2151                 except (TypeError, IndexError, KeyError, ValueError):
2152                     pass
2153             idx += 1
2154
2155     if idx < nr_cols:
2156         logging.error(u"Not enough data to build the table! Skipping")
2157         return
2158
2159     cmp_dict = dict()
2160     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2161         idx_ref = cmp.get(u"reference", None)
2162         idx_cmp = cmp.get(u"compare", None)
2163         if idx_ref is None or idx_cmp is None:
2164             continue
2165         header[0].append(
2166             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2167             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2168         )
2169         header[1].append(u"")
2170         header[2].append(u"")
2171         header[3].append(u"")
2172         for tst_name, tst_data in tbl_dict.items():
2173             if not cmp_dict.get(tst_name, None):
2174                 cmp_dict[tst_name] = list()
2175             ref_data = tst_data.get(idx_ref, None)
2176             cmp_data = tst_data.get(idx_cmp, None)
2177             if ref_data is None or cmp_data is None:
2178                 cmp_dict[tst_name].append(float(u'nan'))
2179             else:
2180                 cmp_dict[tst_name].append(
2181                     relative_change(ref_data, cmp_data)
2182                 )
2183
2184     tbl_lst_none = list()
2185     tbl_lst = list()
2186     for tst_name, tst_data in tbl_dict.items():
2187         itm_lst = [tst_data[u"name"], ]
2188         for idx in range(nr_cols):
2189             item = tst_data.get(-idx - 1, None)
2190             if item is None:
2191                 itm_lst.insert(1, None)
2192             else:
2193                 itm_lst.insert(1, round(item / 1e6, 1))
2194         itm_lst.extend(
2195             [
2196                 None if itm is None else round(itm, 1)
2197                 for itm in cmp_dict[tst_name]
2198             ]
2199         )
2200         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2201             tbl_lst_none.append(itm_lst)
2202         else:
2203             tbl_lst.append(itm_lst)
2204
2205     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2206     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2207     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2208     tbl_lst.extend(tbl_lst_none)
2209
2210     # Generate csv table:
2211     csv_file_name = f"{table[u'output-file']}.csv"
2212     logging.info(f"    Writing the file {csv_file_name}")
2213     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2214         for hdr in header:
2215             file_handler.write(u",".join(hdr) + u"\n")
2216         for test in tbl_lst:
2217             file_handler.write(u",".join(
2218                 [
2219                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2220                     replace(u"null", u"-") for item in test
2221                 ]
2222             ) + u"\n")
2223
2224     txt_file_name = f"{table[u'output-file']}.txt"
2225     logging.info(f"    Writing the file {txt_file_name}")
2226     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2227
2228     # Reorganize header in txt table
2229     txt_table = list()
2230     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2231         for line in list(file_handler):
2232             txt_table.append(line)
2233     try:
2234         txt_table.insert(5, txt_table.pop(2))
2235         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2236             file_handler.writelines(txt_table)
2237     except IndexError:
2238         pass
2239
2240     # Generate html table:
2241     hdr_html = [
2242         u"<br>".join(row) for row in zip(*header)
2243     ]
2244     _tpc_generate_html_table(
2245         hdr_html,
2246         tbl_lst,
2247         table[u'output-file'],
2248         sort_data=True,
2249         title=table.get(u"title", u""),
2250         generate_rst=False
2251     )