Trending: Add 2n-icx
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     threads[idx].append([
301                         gnode,
302                         int(gdata[u"calls"]),
303                         int(gdata[u"vectors"]),
304                         int(gdata[u"suspends"]),
305                         float(gdata[u"clocks"]),
306                         float(gdata[u"vectors"] / gdata[u"calls"]) \
307                             if gdata[u"calls"] else 0.0
308                     ])
309
310             bold = ET.SubElement(tcol, u"b")
311             bold.text = (
312                 f"Host IP: {dut_data.get(u'host', '')}, "
313                 f"Socket: {dut_data.get(u'socket', '')}"
314             )
315             trow = ET.SubElement(
316                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
317             )
318             thead = ET.SubElement(
319                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
320             )
321             thead.text = u"\t"
322
323             for thread_nr, thread in threads.items():
324                 trow = ET.SubElement(
325                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
326                 )
327                 tcol = ET.SubElement(
328                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
329                 )
330                 bold = ET.SubElement(tcol, u"b")
331                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332                 trow = ET.SubElement(
333                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
334                 )
335                 for idx, col in enumerate(tbl_hdr):
336                     tcol = ET.SubElement(
337                         trow, u"td",
338                         attrib=dict(align=u"right" if idx else u"left")
339                     )
340                     font = ET.SubElement(
341                         tcol, u"font", attrib=dict(size=u"2")
342                     )
343                     bold = ET.SubElement(font, u"b")
344                     bold.text = col
345                 for row_nr, row in enumerate(thread):
346                     trow = ET.SubElement(
347                         tbl, u"tr",
348                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
349                     )
350                     for idx, col in enumerate(row):
351                         tcol = ET.SubElement(
352                             trow, u"td",
353                             attrib=dict(align=u"right" if idx else u"left")
354                         )
355                         font = ET.SubElement(
356                             tcol, u"font", attrib=dict(size=u"2")
357                         )
358                         if isinstance(col, float):
359                             font.text = f"{col:.2f}"
360                         else:
361                             font.text = str(col)
362                 trow = ET.SubElement(
363                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
364                 )
365                 thead = ET.SubElement(
366                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
367                 )
368                 thead.text = u"\t"
369
370         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371         thead = ET.SubElement(
372             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
373         )
374         font = ET.SubElement(
375             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
376         )
377         font.text = u"."
378
379         return str(ET.tostring(tbl, encoding=u"unicode"))
380
381     for suite in suites.values:
382         html_table = str()
383         for test_data in data.values:
384             if test_data[u"parent"] not in suite[u"name"]:
385                 continue
386             html_table += _generate_html_table(test_data)
387         if not html_table:
388             continue
389         try:
390             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391             with open(f"{file_name}", u'w') as html_file:
392                 logging.info(f"    Writing file: {file_name}")
393                 html_file.write(u".. raw:: html\n\n\t")
394                 html_file.write(html_table)
395                 html_file.write(u"\n\t<p><br><br></p>\n")
396         except KeyError:
397             logging.warning(u"The output file is not defined.")
398             return
399     logging.info(u"  Done.")
400
401
402 def table_merged_details(table, input_data):
403     """Generate the table(s) with algorithm: table_merged_details
404     specified in the specification file.
405
406     :param table: Table to generate.
407     :param input_data: Data to process.
408     :type table: pandas.Series
409     :type input_data: InputData
410     """
411
412     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
413
414     # Transform the data
415     logging.info(
416         f"    Creating the data set for the {table.get(u'type', u'')} "
417         f"{table.get(u'title', u'')}."
418     )
419     data = input_data.filter_data(table, continue_on_error=True)
420     data = input_data.merge_data(data)
421
422     sort_tests = table.get(u"sort", None)
423     if sort_tests:
424         args = dict(
425             inplace=True,
426             ascending=(sort_tests == u"ascending")
427         )
428         data.sort_index(**args)
429
430     suites = input_data.filter_data(
431         table, continue_on_error=True, data_set=u"suites")
432     suites = input_data.merge_data(suites)
433
434     # Prepare the header of the tables
435     header = list()
436     for column in table[u"columns"]:
437         header.append(
438             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
439         )
440
441     for suite in suites.values:
442         # Generate data
443         suite_name = suite[u"name"]
444         table_lst = list()
445         for test in data.keys():
446             if data[test][u"status"] != u"PASS" or \
447                     data[test][u"parent"] not in suite_name:
448                 continue
449             row_lst = list()
450             for column in table[u"columns"]:
451                 try:
452                     col_data = str(data[test][column[
453                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
454                     # Do not include tests with "Test Failed" in test message
455                     if u"Test Failed" in col_data:
456                         continue
457                     col_data = col_data.replace(
458                         u"No Data", u"Not Captured     "
459                     )
460                     if column[u"data"].split(u" ")[1] in (u"name", ):
461                         if len(col_data) > 30:
462                             col_data_lst = col_data.split(u"-")
463                             half = int(len(col_data_lst) / 2)
464                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
465                                        f"- |br| " \
466                                        f"{u'-'.join(col_data_lst[half:])}"
467                         col_data = f" |prein| {col_data} |preout| "
468                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
469                         # Temporary solution: remove NDR results from message:
470                         if bool(table.get(u'remove-ndr', False)):
471                             try:
472                                 col_data = col_data.split(u"\n", 1)[1]
473                             except IndexError:
474                                 pass
475                         col_data = col_data.replace(u'\n', u' |br| ').\
476                             replace(u'\r', u'').replace(u'"', u"'")
477                         col_data = f" |prein| {col_data} |preout| "
478                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479                         col_data = col_data.replace(u'\n', u' |br| ')
480                         col_data = f" |prein| {col_data[:-5]} |preout| "
481                     row_lst.append(f'"{col_data}"')
482                 except KeyError:
483                     row_lst.append(u'"Not captured"')
484             if len(row_lst) == len(table[u"columns"]):
485                 table_lst.append(row_lst)
486
487         # Write the data to file
488         if table_lst:
489             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491             logging.info(f"      Writing file: {file_name}")
492             with open(file_name, u"wt") as file_handler:
493                 file_handler.write(u",".join(header) + u"\n")
494                 for item in table_lst:
495                     file_handler.write(u",".join(item) + u"\n")
496
497     logging.info(u"  Done.")
498
499
500 def _tpc_modify_test_name(test_name, ignore_nic=False):
501     """Modify a test name by replacing its parts.
502
503     :param test_name: Test name to be modified.
504     :param ignore_nic: If True, NIC is removed from TC name.
505     :type test_name: str
506     :type ignore_nic: bool
507     :returns: Modified test name.
508     :rtype: str
509     """
510     test_name_mod = test_name.\
511         replace(u"-ndrpdr", u"").\
512         replace(u"1t1c", u"1c").\
513         replace(u"2t1c", u"1c"). \
514         replace(u"2t2c", u"2c").\
515         replace(u"4t2c", u"2c"). \
516         replace(u"4t4c", u"4c").\
517         replace(u"8t4c", u"4c")
518
519     if ignore_nic:
520         return re.sub(REGEX_NIC, u"", test_name_mod)
521     return test_name_mod
522
523
524 def _tpc_modify_displayed_test_name(test_name):
525     """Modify a test name which is displayed in a table by replacing its parts.
526
527     :param test_name: Test name to be modified.
528     :type test_name: str
529     :returns: Modified test name.
530     :rtype: str
531     """
532     return test_name.\
533         replace(u"1t1c", u"1c").\
534         replace(u"2t1c", u"1c"). \
535         replace(u"2t2c", u"2c").\
536         replace(u"4t2c", u"2c"). \
537         replace(u"4t4c", u"4c").\
538         replace(u"8t4c", u"4c")
539
540
541 def _tpc_insert_data(target, src, include_tests):
542     """Insert src data to the target structure.
543
544     :param target: Target structure where the data is placed.
545     :param src: Source data to be placed into the target structure.
546     :param include_tests: Which results will be included (MRR, NDR, PDR).
547     :type target: list
548     :type src: dict
549     :type include_tests: str
550     """
551     try:
552         if include_tests == u"MRR":
553             target[u"mean"] = src[u"result"][u"receive-rate"]
554             target[u"stdev"] = src[u"result"][u"receive-stdev"]
555         elif include_tests == u"PDR":
556             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
557         elif include_tests == u"NDR":
558             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
559         elif u"latency" in include_tests:
560             keys = include_tests.split(u"-")
561             if len(keys) == 4:
562                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
563                 target[u"data"].append(
564                     float(u"nan") if lat == -1 else lat * 1e6
565                 )
566     except (KeyError, TypeError):
567         pass
568
569
570 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
571                              footnote=u"", sort_data=True, title=u"",
572                              generate_rst=True):
573     """Generate html table from input data with simple sorting possibility.
574
575     :param header: Table header.
576     :param data: Input data to be included in the table. It is a list of lists.
577         Inner lists are rows in the table. All inner lists must be of the same
578         length. The length of these lists must be the same as the length of the
579         header.
580     :param out_file_name: The name (relative or full path) where the
581         generated html table is written.
582     :param legend: The legend to display below the table.
583     :param footnote: The footnote to display below the table (and legend).
584     :param sort_data: If True the data sorting is enabled.
585     :param title: The table (and file) title.
586     :param generate_rst: If True, wrapping rst file is generated.
587     :type header: list
588     :type data: list of lists
589     :type out_file_name: str
590     :type legend: str
591     :type footnote: str
592     :type sort_data: bool
593     :type title: str
594     :type generate_rst: bool
595     """
596
597     try:
598         idx = header.index(u"Test Case")
599     except ValueError:
600         idx = 0
601     params = {
602         u"align-hdr": (
603             [u"left", u"right"],
604             [u"left", u"left", u"right"],
605             [u"left", u"left", u"left", u"right"]
606         ),
607         u"align-itm": (
608             [u"left", u"right"],
609             [u"left", u"left", u"right"],
610             [u"left", u"left", u"left", u"right"]
611         ),
612         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
613     }
614
615     df_data = pd.DataFrame(data, columns=header)
616
617     if sort_data:
618         df_sorted = [df_data.sort_values(
619             by=[key, header[idx]], ascending=[True, True]
620             if key != header[idx] else [False, True]) for key in header]
621         df_sorted_rev = [df_data.sort_values(
622             by=[key, header[idx]], ascending=[False, True]
623             if key != header[idx] else [True, True]) for key in header]
624         df_sorted.extend(df_sorted_rev)
625     else:
626         df_sorted = df_data
627
628     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
629                    for idx in range(len(df_data))]]
630     table_header = dict(
631         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
632         fill_color=u"#7eade7",
633         align=params[u"align-hdr"][idx],
634         font=dict(
635             family=u"Courier New",
636             size=12
637         )
638     )
639
640     fig = go.Figure()
641
642     if sort_data:
643         for table in df_sorted:
644             columns = [table.get(col) for col in header]
645             fig.add_trace(
646                 go.Table(
647                     columnwidth=params[u"width"][idx],
648                     header=table_header,
649                     cells=dict(
650                         values=columns,
651                         fill_color=fill_color,
652                         align=params[u"align-itm"][idx],
653                         font=dict(
654                             family=u"Courier New",
655                             size=12
656                         )
657                     )
658                 )
659             )
660
661         buttons = list()
662         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
663         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
664         for idx, hdr in enumerate(menu_items):
665             visible = [False, ] * len(menu_items)
666             visible[idx] = True
667             buttons.append(
668                 dict(
669                     label=hdr.replace(u" [Mpps]", u""),
670                     method=u"update",
671                     args=[{u"visible": visible}],
672                 )
673             )
674
675         fig.update_layout(
676             updatemenus=[
677                 go.layout.Updatemenu(
678                     type=u"dropdown",
679                     direction=u"down",
680                     x=0.0,
681                     xanchor=u"left",
682                     y=1.002,
683                     yanchor=u"bottom",
684                     active=len(menu_items) - 1,
685                     buttons=list(buttons)
686                 )
687             ],
688         )
689     else:
690         fig.add_trace(
691             go.Table(
692                 columnwidth=params[u"width"][idx],
693                 header=table_header,
694                 cells=dict(
695                     values=[df_sorted.get(col) for col in header],
696                     fill_color=fill_color,
697                     align=params[u"align-itm"][idx],
698                     font=dict(
699                         family=u"Courier New",
700                         size=12
701                     )
702                 )
703             )
704         )
705
706     ploff.plot(
707         fig,
708         show_link=False,
709         auto_open=False,
710         filename=f"{out_file_name}_in.html"
711     )
712
713     if not generate_rst:
714         return
715
716     file_name = out_file_name.split(u"/")[-1]
717     if u"vpp" in out_file_name:
718         path = u"_tmp/src/vpp_performance_tests/comparisons/"
719     else:
720         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
721     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
722     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
723         rst_file.write(
724             u"\n"
725             u".. |br| raw:: html\n\n    <br />\n\n\n"
726             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
727             u".. |preout| raw:: html\n\n    </pre>\n\n"
728         )
729         if title:
730             rst_file.write(f"{title}\n")
731             rst_file.write(f"{u'`' * len(title)}\n\n")
732         rst_file.write(
733             u".. raw:: html\n\n"
734             f'    <iframe frameborder="0" scrolling="no" '
735             f'width="1600" height="1200" '
736             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
737             f'</iframe>\n\n'
738         )
739
740         if legend:
741             try:
742                 itm_lst = legend[1:-2].split(u"\n")
743                 rst_file.write(
744                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
745                 )
746             except IndexError as err:
747                 logging.error(f"Legend cannot be written to html file\n{err}")
748         if footnote:
749             try:
750                 itm_lst = footnote[1:].split(u"\n")
751                 rst_file.write(
752                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
753                 )
754             except IndexError as err:
755                 logging.error(f"Footnote cannot be written to html file\n{err}")
756
757
758 def table_soak_vs_ndr(table, input_data):
759     """Generate the table(s) with algorithm: table_soak_vs_ndr
760     specified in the specification file.
761
762     :param table: Table to generate.
763     :param input_data: Data to process.
764     :type table: pandas.Series
765     :type input_data: InputData
766     """
767
768     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
769
770     # Transform the data
771     logging.info(
772         f"    Creating the data set for the {table.get(u'type', u'')} "
773         f"{table.get(u'title', u'')}."
774     )
775     data = input_data.filter_data(table, continue_on_error=True)
776
777     # Prepare the header of the table
778     try:
779         header = [
780             u"Test Case",
781             f"Avg({table[u'reference'][u'title']})",
782             f"Stdev({table[u'reference'][u'title']})",
783             f"Avg({table[u'compare'][u'title']})",
784             f"Stdev{table[u'compare'][u'title']})",
785             u"Diff",
786             u"Stdev(Diff)"
787         ]
788         header_str = u";".join(header) + u"\n"
789         legend = (
790             u"\nLegend:\n"
791             f"Avg({table[u'reference'][u'title']}): "
792             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
793             f"from a series of runs of the listed tests.\n"
794             f"Stdev({table[u'reference'][u'title']}): "
795             f"Standard deviation value of {table[u'reference'][u'title']} "
796             f"[Mpps] computed from a series of runs of the listed tests.\n"
797             f"Avg({table[u'compare'][u'title']}): "
798             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
799             f"a series of runs of the listed tests.\n"
800             f"Stdev({table[u'compare'][u'title']}): "
801             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
802             f"computed from a series of runs of the listed tests.\n"
803             f"Diff({table[u'reference'][u'title']},"
804             f"{table[u'compare'][u'title']}): "
805             f"Percentage change calculated for mean values.\n"
806             u"Stdev(Diff): "
807             u"Standard deviation of percentage change calculated for mean "
808             u"values."
809         )
810     except (AttributeError, KeyError) as err:
811         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
812         return
813
814     # Create a list of available SOAK test results:
815     tbl_dict = dict()
816     for job, builds in table[u"compare"][u"data"].items():
817         for build in builds:
818             for tst_name, tst_data in data[job][str(build)].items():
819                 if tst_data[u"type"] == u"SOAK":
820                     tst_name_mod = tst_name.replace(u"-soak", u"")
821                     if tbl_dict.get(tst_name_mod, None) is None:
822                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
823                         nic = groups.group(0) if groups else u""
824                         name = (
825                             f"{nic}-"
826                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
827                         )
828                         tbl_dict[tst_name_mod] = {
829                             u"name": name,
830                             u"ref-data": list(),
831                             u"cmp-data": list()
832                         }
833                     try:
834                         tbl_dict[tst_name_mod][u"cmp-data"].append(
835                             tst_data[u"throughput"][u"LOWER"])
836                     except (KeyError, TypeError):
837                         pass
838     tests_lst = tbl_dict.keys()
839
840     # Add corresponding NDR test results:
841     for job, builds in table[u"reference"][u"data"].items():
842         for build in builds:
843             for tst_name, tst_data in data[job][str(build)].items():
844                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
845                     replace(u"-mrr", u"")
846                 if tst_name_mod not in tests_lst:
847                     continue
848                 try:
849                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
850                         continue
851                     if table[u"include-tests"] == u"MRR":
852                         result = (tst_data[u"result"][u"receive-rate"],
853                                   tst_data[u"result"][u"receive-stdev"])
854                     elif table[u"include-tests"] == u"PDR":
855                         result = \
856                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
857                     elif table[u"include-tests"] == u"NDR":
858                         result = \
859                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
860                     else:
861                         result = None
862                     if result is not None:
863                         tbl_dict[tst_name_mod][u"ref-data"].append(
864                             result)
865                 except (KeyError, TypeError):
866                     continue
867
868     tbl_lst = list()
869     for tst_name in tbl_dict:
870         item = [tbl_dict[tst_name][u"name"], ]
871         data_r = tbl_dict[tst_name][u"ref-data"]
872         if data_r:
873             if table[u"include-tests"] == u"MRR":
874                 data_r_mean = data_r[0][0]
875                 data_r_stdev = data_r[0][1]
876             else:
877                 data_r_mean = mean(data_r)
878                 data_r_stdev = stdev(data_r)
879             item.append(round(data_r_mean / 1e6, 1))
880             item.append(round(data_r_stdev / 1e6, 1))
881         else:
882             data_r_mean = None
883             data_r_stdev = None
884             item.extend([None, None])
885         data_c = tbl_dict[tst_name][u"cmp-data"]
886         if data_c:
887             if table[u"include-tests"] == u"MRR":
888                 data_c_mean = data_c[0][0]
889                 data_c_stdev = data_c[0][1]
890             else:
891                 data_c_mean = mean(data_c)
892                 data_c_stdev = stdev(data_c)
893             item.append(round(data_c_mean / 1e6, 1))
894             item.append(round(data_c_stdev / 1e6, 1))
895         else:
896             data_c_mean = None
897             data_c_stdev = None
898             item.extend([None, None])
899         if data_r_mean is not None and data_c_mean is not None:
900             delta, d_stdev = relative_change_stdev(
901                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
902             try:
903                 item.append(round(delta))
904             except ValueError:
905                 item.append(delta)
906             try:
907                 item.append(round(d_stdev))
908             except ValueError:
909                 item.append(d_stdev)
910             tbl_lst.append(item)
911
912     # Sort the table according to the relative change
913     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
914
915     # Generate csv tables:
916     csv_file_name = f"{table[u'output-file']}.csv"
917     with open(csv_file_name, u"wt") as file_handler:
918         file_handler.write(header_str)
919         for test in tbl_lst:
920             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
921
922     convert_csv_to_pretty_txt(
923         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
924     )
925     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
926         file_handler.write(legend)
927
928     # Generate html table:
929     _tpc_generate_html_table(
930         header,
931         tbl_lst,
932         table[u'output-file'],
933         legend=legend,
934         title=table.get(u"title", u"")
935     )
936
937
938 def table_perf_trending_dash(table, input_data):
939     """Generate the table(s) with algorithm:
940     table_perf_trending_dash
941     specified in the specification file.
942
943     :param table: Table to generate.
944     :param input_data: Data to process.
945     :type table: pandas.Series
946     :type input_data: InputData
947     """
948
949     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
950
951     # Transform the data
952     logging.info(
953         f"    Creating the data set for the {table.get(u'type', u'')} "
954         f"{table.get(u'title', u'')}."
955     )
956     data = input_data.filter_data(table, continue_on_error=True)
957
958     # Prepare the header of the tables
959     header = [
960         u"Test Case",
961         u"Trend [Mpps]",
962         u"Runs [#]",
963         u"Long-Term Change [%]",
964         u"Regressions [#]",
965         u"Progressions [#]"
966     ]
967     header_str = u",".join(header) + u"\n"
968
969     incl_tests = table.get(u"include-tests", u"MRR")
970
971     # Prepare data to the table:
972     tbl_dict = dict()
973     for job, builds in table[u"data"].items():
974         for build in builds:
975             for tst_name, tst_data in data[job][str(build)].items():
976                 if tst_name.lower() in table.get(u"ignore-list", list()):
977                     continue
978                 if tbl_dict.get(tst_name, None) is None:
979                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
980                     if not groups:
981                         continue
982                     nic = groups.group(0)
983                     tbl_dict[tst_name] = {
984                         u"name": f"{nic}-{tst_data[u'name']}",
985                         u"data": OrderedDict()
986                     }
987                 try:
988                     if incl_tests == u"MRR":
989                         tbl_dict[tst_name][u"data"][str(build)] = \
990                             tst_data[u"result"][u"receive-rate"]
991                     elif incl_tests == u"NDR":
992                         tbl_dict[tst_name][u"data"][str(build)] = \
993                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
994                     elif incl_tests == u"PDR":
995                         tbl_dict[tst_name][u"data"][str(build)] = \
996                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
997                 except (TypeError, KeyError):
998                     pass  # No data in output.xml for this test
999
1000     tbl_lst = list()
1001     for tst_name in tbl_dict:
1002         data_t = tbl_dict[tst_name][u"data"]
1003         if len(data_t) < 2:
1004             continue
1005
1006         try:
1007             classification_lst, avgs, _ = classify_anomalies(data_t)
1008         except ValueError as err:
1009             logging.info(f"{err} Skipping")
1010             return
1011
1012         win_size = min(len(data_t), table[u"window"])
1013         long_win_size = min(len(data_t), table[u"long-trend-window"])
1014
1015         try:
1016             max_long_avg = max(
1017                 [x for x in avgs[-long_win_size:-win_size]
1018                  if not isnan(x)])
1019         except ValueError:
1020             max_long_avg = nan
1021         last_avg = avgs[-1]
1022         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1023
1024         nr_of_last_avgs = 0;
1025         for x in reversed(avgs):
1026             if x == last_avg:
1027                 nr_of_last_avgs += 1
1028             else:
1029                 break
1030
1031         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1032             rel_change_last = nan
1033         else:
1034             rel_change_last = round(
1035                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1036
1037         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1038             rel_change_long = nan
1039         else:
1040             rel_change_long = round(
1041                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1042
1043         if classification_lst:
1044             if isnan(rel_change_last) and isnan(rel_change_long):
1045                 continue
1046             if isnan(last_avg) or isnan(rel_change_last) or \
1047                     isnan(rel_change_long):
1048                 continue
1049             tbl_lst.append(
1050                 [tbl_dict[tst_name][u"name"],
1051                  round(last_avg / 1e6, 2),
1052                  nr_of_last_avgs,
1053                  rel_change_long,
1054                  classification_lst[-win_size+1:].count(u"regression"),
1055                  classification_lst[-win_size+1:].count(u"progression")])
1056
1057     tbl_lst.sort(key=lambda rel: rel[0])
1058     tbl_lst.sort(key=lambda rel: rel[2])
1059     tbl_lst.sort(key=lambda rel: rel[3])
1060     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1061     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1062
1063     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1064
1065     logging.info(f"    Writing file: {file_name}")
1066     with open(file_name, u"wt") as file_handler:
1067         file_handler.write(header_str)
1068         for test in tbl_lst:
1069             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1070
1071     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1072     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1073
1074
1075 def _generate_url(testbed, test_name):
1076     """Generate URL to a trending plot from the name of the test case.
1077
1078     :param testbed: The testbed used for testing.
1079     :param test_name: The name of the test case.
1080     :type testbed: str
1081     :type test_name: str
1082     :returns: The URL to the plot with the trending data for the given test
1083         case.
1084     :rtype str
1085     """
1086
1087     if u"x520" in test_name:
1088         nic = u"x520"
1089     elif u"x710" in test_name:
1090         nic = u"x710"
1091     elif u"xl710" in test_name:
1092         nic = u"xl710"
1093     elif u"xxv710" in test_name:
1094         nic = u"xxv710"
1095     elif u"vic1227" in test_name:
1096         nic = u"vic1227"
1097     elif u"vic1385" in test_name:
1098         nic = u"vic1385"
1099     elif u"x553" in test_name:
1100         nic = u"x553"
1101     elif u"cx556" in test_name or u"cx556a" in test_name:
1102         nic = u"cx556a"
1103     elif u"ena" in test_name:
1104         nic = u"nitro50g"
1105     else:
1106         nic = u""
1107
1108     if u"64b" in test_name:
1109         frame_size = u"64b"
1110     elif u"78b" in test_name:
1111         frame_size = u"78b"
1112     elif u"imix" in test_name:
1113         frame_size = u"imix"
1114     elif u"9000b" in test_name:
1115         frame_size = u"9000b"
1116     elif u"1518b" in test_name:
1117         frame_size = u"1518b"
1118     elif u"114b" in test_name:
1119         frame_size = u"114b"
1120     else:
1121         frame_size = u""
1122
1123     if u"1t1c" in test_name or \
1124         (u"-1c-" in test_name and
1125          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1126         cores = u"1t1c"
1127     elif u"2t2c" in test_name or \
1128          (u"-2c-" in test_name and
1129           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1130         cores = u"2t2c"
1131     elif u"4t4c" in test_name or \
1132          (u"-4c-" in test_name and
1133           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1134         cores = u"4t4c"
1135     elif u"2t1c" in test_name or \
1136          (u"-1c-" in test_name and
1137           testbed in
1138           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1139            u"2n-aws", u"3n-aws")):
1140         cores = u"2t1c"
1141     elif u"4t2c" in test_name or \
1142          (u"-2c-" in test_name and
1143           testbed in
1144           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1145            u"2n-aws", u"3n-aws")):
1146         cores = u"4t2c"
1147     elif u"8t4c" in test_name or \
1148          (u"-4c-" in test_name and
1149           testbed in
1150           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1151            u"2n-aws", u"3n-aws")):
1152         cores = u"8t4c"
1153     else:
1154         cores = u""
1155
1156     if u"testpmd" in test_name:
1157         driver = u"testpmd"
1158     elif u"l3fwd" in test_name:
1159         driver = u"l3fwd"
1160     elif u"avf" in test_name:
1161         driver = u"avf"
1162     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1163         driver = u"af_xdp"
1164     elif u"rdma" in test_name:
1165         driver = u"rdma"
1166     elif u"dnv" in testbed or u"tsh" in testbed:
1167         driver = u"ixgbe"
1168     elif u"ena" in test_name:
1169         driver = u"ena"
1170     else:
1171         driver = u"dpdk"
1172
1173     if u"macip-iacl1s" in test_name:
1174         bsf = u"features-macip-iacl1"
1175     elif u"macip-iacl10s" in test_name:
1176         bsf = u"features-macip-iacl10"
1177     elif u"macip-iacl50s" in test_name:
1178         bsf = u"features-macip-iacl50"
1179     elif u"iacl1s" in test_name:
1180         bsf = u"features-iacl1"
1181     elif u"iacl10s" in test_name:
1182         bsf = u"features-iacl10"
1183     elif u"iacl50s" in test_name:
1184         bsf = u"features-iacl50"
1185     elif u"oacl1s" in test_name:
1186         bsf = u"features-oacl1"
1187     elif u"oacl10s" in test_name:
1188         bsf = u"features-oacl10"
1189     elif u"oacl50s" in test_name:
1190         bsf = u"features-oacl50"
1191     elif u"nat44det" in test_name:
1192         bsf = u"nat44det-bidir"
1193     elif u"nat44ed" in test_name and u"udir" in test_name:
1194         bsf = u"nat44ed-udir"
1195     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1196         bsf = u"udp-cps"
1197     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1198         bsf = u"tcp-cps"
1199     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1200         bsf = u"udp-pps"
1201     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1202         bsf = u"tcp-pps"
1203     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1204         bsf = u"udp-tput"
1205     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1206         bsf = u"tcp-tput"
1207     elif u"udpsrcscale" in test_name:
1208         bsf = u"features-udp"
1209     elif u"iacl" in test_name:
1210         bsf = u"features"
1211     elif u"policer" in test_name:
1212         bsf = u"features"
1213     elif u"adl" in test_name:
1214         bsf = u"features"
1215     elif u"cop" in test_name:
1216         bsf = u"features"
1217     elif u"nat" in test_name:
1218         bsf = u"features"
1219     elif u"macip" in test_name:
1220         bsf = u"features"
1221     elif u"scale" in test_name:
1222         bsf = u"scale"
1223     elif u"base" in test_name:
1224         bsf = u"base"
1225     else:
1226         bsf = u"base"
1227
1228     if u"114b" in test_name and u"vhost" in test_name:
1229         domain = u"vts"
1230     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1231         domain = u"nat44"
1232         if u"nat44det" in test_name:
1233             domain += u"-det-bidir"
1234         else:
1235             domain += u"-ed"
1236         if u"udir" in test_name:
1237             domain += u"-unidir"
1238         elif u"-ethip4udp-" in test_name:
1239             domain += u"-udp"
1240         elif u"-ethip4tcp-" in test_name:
1241             domain += u"-tcp"
1242         if u"-cps" in test_name:
1243             domain += u"-cps"
1244         elif u"-pps" in test_name:
1245             domain += u"-pps"
1246         elif u"-tput" in test_name:
1247             domain += u"-tput"
1248     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1249         domain = u"dpdk"
1250     elif u"memif" in test_name:
1251         domain = u"container_memif"
1252     elif u"srv6" in test_name:
1253         domain = u"srv6"
1254     elif u"vhost" in test_name:
1255         domain = u"vhost"
1256         if u"vppl2xc" in test_name:
1257             driver += u"-vpp"
1258         else:
1259             driver += u"-testpmd"
1260         if u"lbvpplacp" in test_name:
1261             bsf += u"-link-bonding"
1262     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1263         domain = u"nf_service_density_vnfc"
1264     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1265         domain = u"nf_service_density_cnfc"
1266     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1267         domain = u"nf_service_density_cnfp"
1268     elif u"ipsec" in test_name:
1269         domain = u"ipsec"
1270         if u"sw" in test_name:
1271             bsf += u"-sw"
1272         elif u"hw" in test_name:
1273             bsf += u"-hw"
1274         elif u"spe" in test_name:
1275             bsf += u"-spe"
1276     elif u"ethip4vxlan" in test_name:
1277         domain = u"ip4_tunnels"
1278     elif u"ethip4udpgeneve" in test_name:
1279         domain = u"ip4_tunnels"
1280     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1281         domain = u"ip4"
1282     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1283         domain = u"ip6"
1284     elif u"l2xcbase" in test_name or \
1285             u"l2xcscale" in test_name or \
1286             u"l2bdbasemaclrn" in test_name or \
1287             u"l2bdscale" in test_name or \
1288             u"l2patch" in test_name:
1289         domain = u"l2"
1290     else:
1291         domain = u""
1292
1293     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1294     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1295
1296     return file_name + anchor_name
1297
1298
1299 def table_perf_trending_dash_html(table, input_data):
1300     """Generate the table(s) with algorithm:
1301     table_perf_trending_dash_html specified in the specification
1302     file.
1303
1304     :param table: Table to generate.
1305     :param input_data: Data to process.
1306     :type table: dict
1307     :type input_data: InputData
1308     """
1309
1310     _ = input_data
1311
1312     if not table.get(u"testbed", None):
1313         logging.error(
1314             f"The testbed is not defined for the table "
1315             f"{table.get(u'title', u'')}. Skipping."
1316         )
1317         return
1318
1319     test_type = table.get(u"test-type", u"MRR")
1320     if test_type not in (u"MRR", u"NDR", u"PDR"):
1321         logging.error(
1322             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1323             f"Skipping."
1324         )
1325         return
1326
1327     if test_type in (u"NDR", u"PDR"):
1328         lnk_dir = u"../ndrpdr_trending/"
1329         lnk_sufix = f"-{test_type.lower()}"
1330     else:
1331         lnk_dir = u"../trending/"
1332         lnk_sufix = u""
1333
1334     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1335
1336     try:
1337         with open(table[u"input-file"], u'rt') as csv_file:
1338             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1339     except FileNotFoundError as err:
1340         logging.warning(f"{err}")
1341         return
1342     except KeyError:
1343         logging.warning(u"The input file is not defined.")
1344         return
1345     except csv.Error as err:
1346         logging.warning(
1347             f"Not possible to process the file {table[u'input-file']}.\n"
1348             f"{repr(err)}"
1349         )
1350         return
1351
1352     # Table:
1353     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1354
1355     # Table header:
1356     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1357     for idx, item in enumerate(csv_lst[0]):
1358         alignment = u"left" if idx == 0 else u"center"
1359         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1360         thead.text = item
1361
1362     # Rows:
1363     colors = {
1364         u"regression": (
1365             u"#ffcccc",
1366             u"#ff9999"
1367         ),
1368         u"progression": (
1369             u"#c6ecc6",
1370             u"#9fdf9f"
1371         ),
1372         u"normal": (
1373             u"#e9f1fb",
1374             u"#d4e4f7"
1375         )
1376     }
1377     for r_idx, row in enumerate(csv_lst[1:]):
1378         if int(row[4]):
1379             color = u"regression"
1380         elif int(row[5]):
1381             color = u"progression"
1382         else:
1383             color = u"normal"
1384         trow = ET.SubElement(
1385             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1386         )
1387
1388         # Columns:
1389         for c_idx, item in enumerate(row):
1390             tdata = ET.SubElement(
1391                 trow,
1392                 u"td",
1393                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1394             )
1395             # Name:
1396             if c_idx == 0 and table.get(u"add-links", True):
1397                 ref = ET.SubElement(
1398                     tdata,
1399                     u"a",
1400                     attrib=dict(
1401                         href=f"{lnk_dir}"
1402                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1403                         f"{lnk_sufix}"
1404                     )
1405                 )
1406                 ref.text = item
1407             else:
1408                 tdata.text = item
1409     try:
1410         with open(table[u"output-file"], u'w') as html_file:
1411             logging.info(f"    Writing file: {table[u'output-file']}")
1412             html_file.write(u".. raw:: html\n\n\t")
1413             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1414             html_file.write(u"\n\t<p><br><br></p>\n")
1415     except KeyError:
1416         logging.warning(u"The output file is not defined.")
1417         return
1418
1419
1420 def table_last_failed_tests(table, input_data):
1421     """Generate the table(s) with algorithm: table_last_failed_tests
1422     specified in the specification file.
1423
1424     :param table: Table to generate.
1425     :param input_data: Data to process.
1426     :type table: pandas.Series
1427     :type input_data: InputData
1428     """
1429
1430     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1431
1432     # Transform the data
1433     logging.info(
1434         f"    Creating the data set for the {table.get(u'type', u'')} "
1435         f"{table.get(u'title', u'')}."
1436     )
1437
1438     data = input_data.filter_data(table, continue_on_error=True)
1439
1440     if data is None or data.empty:
1441         logging.warning(
1442             f"    No data for the {table.get(u'type', u'')} "
1443             f"{table.get(u'title', u'')}."
1444         )
1445         return
1446
1447     tbl_list = list()
1448     for job, builds in table[u"data"].items():
1449         for build in builds:
1450             build = str(build)
1451             try:
1452                 version = input_data.metadata(job, build).get(u"version", u"")
1453                 duration = \
1454                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1455             except KeyError:
1456                 logging.error(f"Data for {job}: {build} is not present.")
1457                 return
1458             tbl_list.append(build)
1459             tbl_list.append(version)
1460             failed_tests = list()
1461             passed = 0
1462             failed = 0
1463             for tst_data in data[job][build].values:
1464                 if tst_data[u"status"] != u"FAIL":
1465                     passed += 1
1466                     continue
1467                 failed += 1
1468                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1469                 if not groups:
1470                     continue
1471                 nic = groups.group(0)
1472                 msg = tst_data[u'msg'].replace(u"\n", u"")
1473                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1474                              'xxx.xxx.xxx.xxx', msg)
1475                 msg = msg.split(u'Also teardown failed')[0]
1476                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1477             tbl_list.append(passed)
1478             tbl_list.append(failed)
1479             tbl_list.append(duration)
1480             tbl_list.extend(failed_tests)
1481
1482     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1483     logging.info(f"    Writing file: {file_name}")
1484     with open(file_name, u"wt") as file_handler:
1485         for test in tbl_list:
1486             file_handler.write(f"{test}\n")
1487
1488
1489 def table_failed_tests(table, input_data):
1490     """Generate the table(s) with algorithm: table_failed_tests
1491     specified in the specification file.
1492
1493     :param table: Table to generate.
1494     :param input_data: Data to process.
1495     :type table: pandas.Series
1496     :type input_data: InputData
1497     """
1498
1499     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1500
1501     # Transform the data
1502     logging.info(
1503         f"    Creating the data set for the {table.get(u'type', u'')} "
1504         f"{table.get(u'title', u'')}."
1505     )
1506     data = input_data.filter_data(table, continue_on_error=True)
1507
1508     test_type = u"MRR"
1509     if u"NDRPDR" in table.get(u"filter", list()):
1510         test_type = u"NDRPDR"
1511
1512     # Prepare the header of the tables
1513     header = [
1514         u"Test Case",
1515         u"Failures [#]",
1516         u"Last Failure [Time]",
1517         u"Last Failure [VPP-Build-Id]",
1518         u"Last Failure [CSIT-Job-Build-Id]"
1519     ]
1520
1521     # Generate the data for the table according to the model in the table
1522     # specification
1523
1524     now = dt.utcnow()
1525     timeperiod = timedelta(int(table.get(u"window", 7)))
1526
1527     tbl_dict = dict()
1528     for job, builds in table[u"data"].items():
1529         for build in builds:
1530             build = str(build)
1531             for tst_name, tst_data in data[job][build].items():
1532                 if tst_name.lower() in table.get(u"ignore-list", list()):
1533                     continue
1534                 if tbl_dict.get(tst_name, None) is None:
1535                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1536                     if not groups:
1537                         continue
1538                     nic = groups.group(0)
1539                     tbl_dict[tst_name] = {
1540                         u"name": f"{nic}-{tst_data[u'name']}",
1541                         u"data": OrderedDict()
1542                     }
1543                 try:
1544                     generated = input_data.metadata(job, build).\
1545                         get(u"generated", u"")
1546                     if not generated:
1547                         continue
1548                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1549                     if (now - then) <= timeperiod:
1550                         tbl_dict[tst_name][u"data"][build] = (
1551                             tst_data[u"status"],
1552                             generated,
1553                             input_data.metadata(job, build).get(u"version",
1554                                                                 u""),
1555                             build
1556                         )
1557                 except (TypeError, KeyError) as err:
1558                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1559
1560     max_fails = 0
1561     tbl_lst = list()
1562     for tst_data in tbl_dict.values():
1563         fails_nr = 0
1564         fails_last_date = u""
1565         fails_last_vpp = u""
1566         fails_last_csit = u""
1567         for val in tst_data[u"data"].values():
1568             if val[0] == u"FAIL":
1569                 fails_nr += 1
1570                 fails_last_date = val[1]
1571                 fails_last_vpp = val[2]
1572                 fails_last_csit = val[3]
1573         if fails_nr:
1574             max_fails = fails_nr if fails_nr > max_fails else max_fails
1575             tbl_lst.append([
1576                 tst_data[u"name"],
1577                 fails_nr,
1578                 fails_last_date,
1579                 fails_last_vpp,
1580                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1581                 f"-build-{fails_last_csit}"
1582             ])
1583
1584     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1585     tbl_sorted = list()
1586     for nrf in range(max_fails, -1, -1):
1587         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1588         tbl_sorted.extend(tbl_fails)
1589
1590     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1591     logging.info(f"    Writing file: {file_name}")
1592     with open(file_name, u"wt") as file_handler:
1593         file_handler.write(u",".join(header) + u"\n")
1594         for test in tbl_sorted:
1595             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1596
1597     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1598     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1599
1600
1601 def table_failed_tests_html(table, input_data):
1602     """Generate the table(s) with algorithm: table_failed_tests_html
1603     specified in the specification file.
1604
1605     :param table: Table to generate.
1606     :param input_data: Data to process.
1607     :type table: pandas.Series
1608     :type input_data: InputData
1609     """
1610
1611     _ = input_data
1612
1613     if not table.get(u"testbed", None):
1614         logging.error(
1615             f"The testbed is not defined for the table "
1616             f"{table.get(u'title', u'')}. Skipping."
1617         )
1618         return
1619
1620     test_type = table.get(u"test-type", u"MRR")
1621     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1622         logging.error(
1623             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1624             f"Skipping."
1625         )
1626         return
1627
1628     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1629         lnk_dir = u"../ndrpdr_trending/"
1630         lnk_sufix = u"-pdr"
1631     else:
1632         lnk_dir = u"../trending/"
1633         lnk_sufix = u""
1634
1635     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1636
1637     try:
1638         with open(table[u"input-file"], u'rt') as csv_file:
1639             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1640     except KeyError:
1641         logging.warning(u"The input file is not defined.")
1642         return
1643     except csv.Error as err:
1644         logging.warning(
1645             f"Not possible to process the file {table[u'input-file']}.\n"
1646             f"{repr(err)}"
1647         )
1648         return
1649
1650     # Table:
1651     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1652
1653     # Table header:
1654     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1655     for idx, item in enumerate(csv_lst[0]):
1656         alignment = u"left" if idx == 0 else u"center"
1657         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1658         thead.text = item
1659
1660     # Rows:
1661     colors = (u"#e9f1fb", u"#d4e4f7")
1662     for r_idx, row in enumerate(csv_lst[1:]):
1663         background = colors[r_idx % 2]
1664         trow = ET.SubElement(
1665             failed_tests, u"tr", attrib=dict(bgcolor=background)
1666         )
1667
1668         # Columns:
1669         for c_idx, item in enumerate(row):
1670             tdata = ET.SubElement(
1671                 trow,
1672                 u"td",
1673                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1674             )
1675             # Name:
1676             if c_idx == 0 and table.get(u"add-links", True):
1677                 ref = ET.SubElement(
1678                     tdata,
1679                     u"a",
1680                     attrib=dict(
1681                         href=f"{lnk_dir}"
1682                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1683                         f"{lnk_sufix}"
1684                     )
1685                 )
1686                 ref.text = item
1687             else:
1688                 tdata.text = item
1689     try:
1690         with open(table[u"output-file"], u'w') as html_file:
1691             logging.info(f"    Writing file: {table[u'output-file']}")
1692             html_file.write(u".. raw:: html\n\n\t")
1693             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1694             html_file.write(u"\n\t<p><br><br></p>\n")
1695     except KeyError:
1696         logging.warning(u"The output file is not defined.")
1697         return
1698
1699
1700 def table_comparison(table, input_data):
1701     """Generate the table(s) with algorithm: table_comparison
1702     specified in the specification file.
1703
1704     :param table: Table to generate.
1705     :param input_data: Data to process.
1706     :type table: pandas.Series
1707     :type input_data: InputData
1708     """
1709     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1710
1711     # Transform the data
1712     logging.info(
1713         f"    Creating the data set for the {table.get(u'type', u'')} "
1714         f"{table.get(u'title', u'')}."
1715     )
1716
1717     columns = table.get(u"columns", None)
1718     if not columns:
1719         logging.error(
1720             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1721         )
1722         return
1723
1724     cols = list()
1725     for idx, col in enumerate(columns):
1726         if col.get(u"data-set", None) is None:
1727             logging.warning(f"No data for column {col.get(u'title', u'')}")
1728             continue
1729         tag = col.get(u"tag", None)
1730         data = input_data.filter_data(
1731             table,
1732             params=[
1733                 u"throughput",
1734                 u"result",
1735                 u"latency",
1736                 u"name",
1737                 u"parent",
1738                 u"tags"
1739             ],
1740             data=col[u"data-set"],
1741             continue_on_error=True
1742         )
1743         col_data = {
1744             u"title": col.get(u"title", f"Column{idx}"),
1745             u"data": dict()
1746         }
1747         for builds in data.values:
1748             for build in builds:
1749                 for tst_name, tst_data in build.items():
1750                     if tag and tag not in tst_data[u"tags"]:
1751                         continue
1752                     tst_name_mod = \
1753                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1754                         replace(u"2n1l-", u"")
1755                     if col_data[u"data"].get(tst_name_mod, None) is None:
1756                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1757                         if u"across testbeds" in table[u"title"].lower() or \
1758                                 u"across topologies" in table[u"title"].lower():
1759                             name = _tpc_modify_displayed_test_name(name)
1760                         col_data[u"data"][tst_name_mod] = {
1761                             u"name": name,
1762                             u"replace": True,
1763                             u"data": list(),
1764                             u"mean": None,
1765                             u"stdev": None
1766                         }
1767                     _tpc_insert_data(
1768                         target=col_data[u"data"][tst_name_mod],
1769                         src=tst_data,
1770                         include_tests=table[u"include-tests"]
1771                     )
1772
1773         replacement = col.get(u"data-replacement", None)
1774         if replacement:
1775             rpl_data = input_data.filter_data(
1776                 table,
1777                 params=[
1778                     u"throughput",
1779                     u"result",
1780                     u"latency",
1781                     u"name",
1782                     u"parent",
1783                     u"tags"
1784                 ],
1785                 data=replacement,
1786                 continue_on_error=True
1787             )
1788             for builds in rpl_data.values:
1789                 for build in builds:
1790                     for tst_name, tst_data in build.items():
1791                         if tag and tag not in tst_data[u"tags"]:
1792                             continue
1793                         tst_name_mod = \
1794                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1795                             replace(u"2n1l-", u"")
1796                         if col_data[u"data"].get(tst_name_mod, None) is None:
1797                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1798                             if u"across testbeds" in table[u"title"].lower() \
1799                                     or u"across topologies" in \
1800                                     table[u"title"].lower():
1801                                 name = _tpc_modify_displayed_test_name(name)
1802                             col_data[u"data"][tst_name_mod] = {
1803                                 u"name": name,
1804                                 u"replace": False,
1805                                 u"data": list(),
1806                                 u"mean": None,
1807                                 u"stdev": None
1808                             }
1809                         if col_data[u"data"][tst_name_mod][u"replace"]:
1810                             col_data[u"data"][tst_name_mod][u"replace"] = False
1811                             col_data[u"data"][tst_name_mod][u"data"] = list()
1812                         _tpc_insert_data(
1813                             target=col_data[u"data"][tst_name_mod],
1814                             src=tst_data,
1815                             include_tests=table[u"include-tests"]
1816                         )
1817
1818         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1819                 u"latency" in table[u"include-tests"]:
1820             for tst_name, tst_data in col_data[u"data"].items():
1821                 if tst_data[u"data"]:
1822                     tst_data[u"mean"] = mean(tst_data[u"data"])
1823                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1824
1825         cols.append(col_data)
1826
1827     tbl_dict = dict()
1828     for col in cols:
1829         for tst_name, tst_data in col[u"data"].items():
1830             if tbl_dict.get(tst_name, None) is None:
1831                 tbl_dict[tst_name] = {
1832                     "name": tst_data[u"name"]
1833                 }
1834             tbl_dict[tst_name][col[u"title"]] = {
1835                 u"mean": tst_data[u"mean"],
1836                 u"stdev": tst_data[u"stdev"]
1837             }
1838
1839     if not tbl_dict:
1840         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1841         return
1842
1843     tbl_lst = list()
1844     for tst_data in tbl_dict.values():
1845         row = [tst_data[u"name"], ]
1846         for col in cols:
1847             row.append(tst_data.get(col[u"title"], None))
1848         tbl_lst.append(row)
1849
1850     comparisons = table.get(u"comparisons", None)
1851     rcas = list()
1852     if comparisons and isinstance(comparisons, list):
1853         for idx, comp in enumerate(comparisons):
1854             try:
1855                 col_ref = int(comp[u"reference"])
1856                 col_cmp = int(comp[u"compare"])
1857             except KeyError:
1858                 logging.warning(u"Comparison: No references defined! Skipping.")
1859                 comparisons.pop(idx)
1860                 continue
1861             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1862                     col_ref == col_cmp):
1863                 logging.warning(f"Wrong values of reference={col_ref} "
1864                                 f"and/or compare={col_cmp}. Skipping.")
1865                 comparisons.pop(idx)
1866                 continue
1867             rca_file_name = comp.get(u"rca-file", None)
1868             if rca_file_name:
1869                 try:
1870                     with open(rca_file_name, u"r") as file_handler:
1871                         rcas.append(
1872                             {
1873                                 u"title": f"RCA{idx + 1}",
1874                                 u"data": load(file_handler, Loader=FullLoader)
1875                             }
1876                         )
1877                 except (YAMLError, IOError) as err:
1878                     logging.warning(
1879                         f"The RCA file {rca_file_name} does not exist or "
1880                         f"it is corrupted!"
1881                     )
1882                     logging.debug(repr(err))
1883                     rcas.append(None)
1884             else:
1885                 rcas.append(None)
1886     else:
1887         comparisons = None
1888
1889     tbl_cmp_lst = list()
1890     if comparisons:
1891         for row in tbl_lst:
1892             new_row = deepcopy(row)
1893             for comp in comparisons:
1894                 ref_itm = row[int(comp[u"reference"])]
1895                 if ref_itm is None and \
1896                         comp.get(u"reference-alt", None) is not None:
1897                     ref_itm = row[int(comp[u"reference-alt"])]
1898                 cmp_itm = row[int(comp[u"compare"])]
1899                 if ref_itm is not None and cmp_itm is not None and \
1900                         ref_itm[u"mean"] is not None and \
1901                         cmp_itm[u"mean"] is not None and \
1902                         ref_itm[u"stdev"] is not None and \
1903                         cmp_itm[u"stdev"] is not None:
1904                     try:
1905                         delta, d_stdev = relative_change_stdev(
1906                             ref_itm[u"mean"], cmp_itm[u"mean"],
1907                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1908                         )
1909                     except ZeroDivisionError:
1910                         break
1911                     if delta is None or math.isnan(delta):
1912                         break
1913                     new_row.append({
1914                         u"mean": delta * 1e6,
1915                         u"stdev": d_stdev * 1e6
1916                     })
1917                 else:
1918                     break
1919             else:
1920                 tbl_cmp_lst.append(new_row)
1921
1922     try:
1923         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1924         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1925     except TypeError as err:
1926         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1927
1928     tbl_for_csv = list()
1929     for line in tbl_cmp_lst:
1930         row = [line[0], ]
1931         for idx, itm in enumerate(line[1:]):
1932             if itm is None or not isinstance(itm, dict) or\
1933                     itm.get(u'mean', None) is None or \
1934                     itm.get(u'stdev', None) is None:
1935                 row.append(u"NT")
1936                 row.append(u"NT")
1937             else:
1938                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1939                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1940         for rca in rcas:
1941             if rca is None:
1942                 continue
1943             rca_nr = rca[u"data"].get(row[0], u"-")
1944             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1945         tbl_for_csv.append(row)
1946
1947     header_csv = [u"Test Case", ]
1948     for col in cols:
1949         header_csv.append(f"Avg({col[u'title']})")
1950         header_csv.append(f"Stdev({col[u'title']})")
1951     for comp in comparisons:
1952         header_csv.append(
1953             f"Avg({comp.get(u'title', u'')})"
1954         )
1955         header_csv.append(
1956             f"Stdev({comp.get(u'title', u'')})"
1957         )
1958     for rca in rcas:
1959         if rca:
1960             header_csv.append(rca[u"title"])
1961
1962     legend_lst = table.get(u"legend", None)
1963     if legend_lst is None:
1964         legend = u""
1965     else:
1966         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1967
1968     footnote = u""
1969     if rcas and any(rcas):
1970         footnote += u"\nRoot Cause Analysis:\n"
1971         for rca in rcas:
1972             if rca:
1973                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1974
1975     csv_file_name = f"{table[u'output-file']}-csv.csv"
1976     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1977         file_handler.write(
1978             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1979         )
1980         for test in tbl_for_csv:
1981             file_handler.write(
1982                 u",".join([f'"{item}"' for item in test]) + u"\n"
1983             )
1984         if legend_lst:
1985             for item in legend_lst:
1986                 file_handler.write(f'"{item}"\n')
1987         if footnote:
1988             for itm in footnote.split(u"\n"):
1989                 file_handler.write(f'"{itm}"\n')
1990
1991     tbl_tmp = list()
1992     max_lens = [0, ] * len(tbl_cmp_lst[0])
1993     for line in tbl_cmp_lst:
1994         row = [line[0], ]
1995         for idx, itm in enumerate(line[1:]):
1996             if itm is None or not isinstance(itm, dict) or \
1997                     itm.get(u'mean', None) is None or \
1998                     itm.get(u'stdev', None) is None:
1999                 new_itm = u"NT"
2000             else:
2001                 if idx < len(cols):
2002                     new_itm = (
2003                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
2004                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2005                         replace(u"nan", u"NaN")
2006                     )
2007                 else:
2008                     new_itm = (
2009                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2010                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2011                         replace(u"nan", u"NaN")
2012                     )
2013             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2014                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2015             row.append(new_itm)
2016
2017         tbl_tmp.append(row)
2018
2019     header = [u"Test Case", ]
2020     header.extend([col[u"title"] for col in cols])
2021     header.extend([comp.get(u"title", u"") for comp in comparisons])
2022
2023     tbl_final = list()
2024     for line in tbl_tmp:
2025         row = [line[0], ]
2026         for idx, itm in enumerate(line[1:]):
2027             if itm in (u"NT", u"NaN"):
2028                 row.append(itm)
2029                 continue
2030             itm_lst = itm.rsplit(u"\u00B1", 1)
2031             itm_lst[-1] = \
2032                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2033             itm_str = u"\u00B1".join(itm_lst)
2034
2035             if idx >= len(cols):
2036                 # Diffs
2037                 rca = rcas[idx - len(cols)]
2038                 if rca:
2039                     # Add rcas to diffs
2040                     rca_nr = rca[u"data"].get(row[0], None)
2041                     if rca_nr:
2042                         hdr_len = len(header[idx + 1]) - 1
2043                         if hdr_len < 19:
2044                             hdr_len = 19
2045                         rca_nr = f"[{rca_nr}]"
2046                         itm_str = (
2047                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2048                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2049                             f"{itm_str}"
2050                         )
2051             row.append(itm_str)
2052         tbl_final.append(row)
2053
2054     # Generate csv tables:
2055     csv_file_name = f"{table[u'output-file']}.csv"
2056     logging.info(f"    Writing the file {csv_file_name}")
2057     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2058         file_handler.write(u";".join(header) + u"\n")
2059         for test in tbl_final:
2060             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2061
2062     # Generate txt table:
2063     txt_file_name = f"{table[u'output-file']}.txt"
2064     logging.info(f"    Writing the file {txt_file_name}")
2065     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2066
2067     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2068         file_handler.write(legend)
2069         file_handler.write(footnote)
2070
2071     # Generate html table:
2072     _tpc_generate_html_table(
2073         header,
2074         tbl_final,
2075         table[u'output-file'],
2076         legend=legend,
2077         footnote=footnote,
2078         sort_data=False,
2079         title=table.get(u"title", u"")
2080     )
2081
2082
2083 def table_weekly_comparison(table, in_data):
2084     """Generate the table(s) with algorithm: table_weekly_comparison
2085     specified in the specification file.
2086
2087     :param table: Table to generate.
2088     :param in_data: Data to process.
2089     :type table: pandas.Series
2090     :type in_data: InputData
2091     """
2092     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2093
2094     # Transform the data
2095     logging.info(
2096         f"    Creating the data set for the {table.get(u'type', u'')} "
2097         f"{table.get(u'title', u'')}."
2098     )
2099
2100     incl_tests = table.get(u"include-tests", None)
2101     if incl_tests not in (u"NDR", u"PDR"):
2102         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2103         return
2104
2105     nr_cols = table.get(u"nr-of-data-columns", None)
2106     if not nr_cols or nr_cols < 2:
2107         logging.error(
2108             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2109         )
2110         return
2111
2112     data = in_data.filter_data(
2113         table,
2114         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2115         continue_on_error=True
2116     )
2117
2118     header = [
2119         [u"VPP Version", ],
2120         [u"Start Timestamp", ],
2121         [u"CSIT Build", ],
2122         [u"CSIT Testbed", ]
2123     ]
2124     tbl_dict = dict()
2125     idx = 0
2126     tb_tbl = table.get(u"testbeds", None)
2127     for job_name, job_data in data.items():
2128         for build_nr, build in job_data.items():
2129             if idx >= nr_cols:
2130                 break
2131             if build.empty:
2132                 continue
2133
2134             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2135             if tb_ip and tb_tbl:
2136                 testbed = tb_tbl.get(tb_ip, u"")
2137             else:
2138                 testbed = u""
2139             header[2].insert(1, build_nr)
2140             header[3].insert(1, testbed)
2141             header[1].insert(
2142                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2143             )
2144             header[0].insert(
2145                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2146             )
2147
2148             for tst_name, tst_data in build.items():
2149                 tst_name_mod = \
2150                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2151                 if not tbl_dict.get(tst_name_mod, None):
2152                     tbl_dict[tst_name_mod] = dict(
2153                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2154                     )
2155                 try:
2156                     tbl_dict[tst_name_mod][-idx - 1] = \
2157                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2158                 except (TypeError, IndexError, KeyError, ValueError):
2159                     pass
2160             idx += 1
2161
2162     if idx < nr_cols:
2163         logging.error(u"Not enough data to build the table! Skipping")
2164         return
2165
2166     cmp_dict = dict()
2167     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2168         idx_ref = cmp.get(u"reference", None)
2169         idx_cmp = cmp.get(u"compare", None)
2170         if idx_ref is None or idx_cmp is None:
2171             continue
2172         header[0].append(
2173             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2174             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2175         )
2176         header[1].append(u"")
2177         header[2].append(u"")
2178         header[3].append(u"")
2179         for tst_name, tst_data in tbl_dict.items():
2180             if not cmp_dict.get(tst_name, None):
2181                 cmp_dict[tst_name] = list()
2182             ref_data = tst_data.get(idx_ref, None)
2183             cmp_data = tst_data.get(idx_cmp, None)
2184             if ref_data is None or cmp_data is None:
2185                 cmp_dict[tst_name].append(float(u'nan'))
2186             else:
2187                 cmp_dict[tst_name].append(
2188                     relative_change(ref_data, cmp_data)
2189                 )
2190
2191     tbl_lst_none = list()
2192     tbl_lst = list()
2193     for tst_name, tst_data in tbl_dict.items():
2194         itm_lst = [tst_data[u"name"], ]
2195         for idx in range(nr_cols):
2196             item = tst_data.get(-idx - 1, None)
2197             if item is None:
2198                 itm_lst.insert(1, None)
2199             else:
2200                 itm_lst.insert(1, round(item / 1e6, 1))
2201         itm_lst.extend(
2202             [
2203                 None if itm is None else round(itm, 1)
2204                 for itm in cmp_dict[tst_name]
2205             ]
2206         )
2207         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2208             tbl_lst_none.append(itm_lst)
2209         else:
2210             tbl_lst.append(itm_lst)
2211
2212     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2213     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2214     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2215     tbl_lst.extend(tbl_lst_none)
2216
2217     # Generate csv table:
2218     csv_file_name = f"{table[u'output-file']}.csv"
2219     logging.info(f"    Writing the file {csv_file_name}")
2220     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2221         for hdr in header:
2222             file_handler.write(u",".join(hdr) + u"\n")
2223         for test in tbl_lst:
2224             file_handler.write(u",".join(
2225                 [
2226                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2227                     replace(u"null", u"-") for item in test
2228                 ]
2229             ) + u"\n")
2230
2231     txt_file_name = f"{table[u'output-file']}.txt"
2232     logging.info(f"    Writing the file {txt_file_name}")
2233     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2234
2235     # Reorganize header in txt table
2236     txt_table = list()
2237     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2238         for line in list(file_handler):
2239             txt_table.append(line)
2240     try:
2241         txt_table.insert(5, txt_table.pop(2))
2242         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2243             file_handler.writelines(txt_table)
2244     except IndexError:
2245         pass
2246
2247     # Generate html table:
2248     hdr_html = [
2249         u"<br>".join(row) for row in zip(*header)
2250     ]
2251     _tpc_generate_html_table(
2252         hdr_html,
2253         tbl_lst,
2254         table[u'output-file'],
2255         sort_data=True,
2256         title=table.get(u"title", u""),
2257         generate_rst=False
2258     )