PAL: Add hoststack and vsap to comp tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     threads[idx].append([
301                         gnode,
302                         int(gdata[u"calls"]),
303                         int(gdata[u"vectors"]),
304                         int(gdata[u"suspends"]),
305                         float(gdata[u"clocks"]),
306                         float(gdata[u"vectors"] / gdata[u"calls"]) \
307                             if gdata[u"calls"] else 0.0
308                     ])
309
310             bold = ET.SubElement(tcol, u"b")
311             bold.text = (
312                 f"Host IP: {dut_data.get(u'host', '')}, "
313                 f"Socket: {dut_data.get(u'socket', '')}"
314             )
315             trow = ET.SubElement(
316                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
317             )
318             thead = ET.SubElement(
319                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
320             )
321             thead.text = u"\t"
322
323             for thread_nr, thread in threads.items():
324                 trow = ET.SubElement(
325                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
326                 )
327                 tcol = ET.SubElement(
328                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
329                 )
330                 bold = ET.SubElement(tcol, u"b")
331                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332                 trow = ET.SubElement(
333                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
334                 )
335                 for idx, col in enumerate(tbl_hdr):
336                     tcol = ET.SubElement(
337                         trow, u"td",
338                         attrib=dict(align=u"right" if idx else u"left")
339                     )
340                     font = ET.SubElement(
341                         tcol, u"font", attrib=dict(size=u"2")
342                     )
343                     bold = ET.SubElement(font, u"b")
344                     bold.text = col
345                 for row_nr, row in enumerate(thread):
346                     trow = ET.SubElement(
347                         tbl, u"tr",
348                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
349                     )
350                     for idx, col in enumerate(row):
351                         tcol = ET.SubElement(
352                             trow, u"td",
353                             attrib=dict(align=u"right" if idx else u"left")
354                         )
355                         font = ET.SubElement(
356                             tcol, u"font", attrib=dict(size=u"2")
357                         )
358                         if isinstance(col, float):
359                             font.text = f"{col:.2f}"
360                         else:
361                             font.text = str(col)
362                 trow = ET.SubElement(
363                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
364                 )
365                 thead = ET.SubElement(
366                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
367                 )
368                 thead.text = u"\t"
369
370         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371         thead = ET.SubElement(
372             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
373         )
374         font = ET.SubElement(
375             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
376         )
377         font.text = u"."
378
379         return str(ET.tostring(tbl, encoding=u"unicode"))
380
381     for suite in suites.values:
382         html_table = str()
383         for test_data in data.values:
384             if test_data[u"parent"] not in suite[u"name"]:
385                 continue
386             html_table += _generate_html_table(test_data)
387         if not html_table:
388             continue
389         try:
390             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391             with open(f"{file_name}", u'w') as html_file:
392                 logging.info(f"    Writing file: {file_name}")
393                 html_file.write(u".. raw:: html\n\n\t")
394                 html_file.write(html_table)
395                 html_file.write(u"\n\t<p><br><br></p>\n")
396         except KeyError:
397             logging.warning(u"The output file is not defined.")
398             return
399     logging.info(u"  Done.")
400
401
402 def table_merged_details(table, input_data):
403     """Generate the table(s) with algorithm: table_merged_details
404     specified in the specification file.
405
406     :param table: Table to generate.
407     :param input_data: Data to process.
408     :type table: pandas.Series
409     :type input_data: InputData
410     """
411
412     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
413
414     # Transform the data
415     logging.info(
416         f"    Creating the data set for the {table.get(u'type', u'')} "
417         f"{table.get(u'title', u'')}."
418     )
419     data = input_data.filter_data(table, continue_on_error=True)
420     data = input_data.merge_data(data)
421
422     sort_tests = table.get(u"sort", None)
423     if sort_tests:
424         args = dict(
425             inplace=True,
426             ascending=(sort_tests == u"ascending")
427         )
428         data.sort_index(**args)
429
430     suites = input_data.filter_data(
431         table, continue_on_error=True, data_set=u"suites")
432     suites = input_data.merge_data(suites)
433
434     # Prepare the header of the tables
435     header = list()
436     for column in table[u"columns"]:
437         header.append(
438             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
439         )
440
441     for suite in suites.values:
442         # Generate data
443         suite_name = suite[u"name"]
444         table_lst = list()
445         for test in data.keys():
446             if data[test][u"status"] != u"PASS" or \
447                     data[test][u"parent"] not in suite_name:
448                 continue
449             row_lst = list()
450             for column in table[u"columns"]:
451                 try:
452                     col_data = str(data[test][column[
453                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
454                     # Do not include tests with "Test Failed" in test message
455                     if u"Test Failed" in col_data:
456                         continue
457                     col_data = col_data.replace(
458                         u"No Data", u"Not Captured     "
459                     )
460                     if column[u"data"].split(u" ")[1] in (u"name", ):
461                         if len(col_data) > 30:
462                             col_data_lst = col_data.split(u"-")
463                             half = int(len(col_data_lst) / 2)
464                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
465                                        f"- |br| " \
466                                        f"{u'-'.join(col_data_lst[half:])}"
467                         col_data = f" |prein| {col_data} |preout| "
468                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
469                         # Temporary solution: remove NDR results from message:
470                         if bool(table.get(u'remove-ndr', False)):
471                             try:
472                                 col_data = col_data.split(u"\n", 1)[1]
473                             except IndexError:
474                                 pass
475                         col_data = col_data.replace(u'\n', u' |br| ').\
476                             replace(u'\r', u'').replace(u'"', u"'")
477                         col_data = f" |prein| {col_data} |preout| "
478                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479                         col_data = col_data.replace(u'\n', u' |br| ')
480                         col_data = f" |prein| {col_data[:-5]} |preout| "
481                     row_lst.append(f'"{col_data}"')
482                 except KeyError:
483                     row_lst.append(u'"Not captured"')
484             if len(row_lst) == len(table[u"columns"]):
485                 table_lst.append(row_lst)
486
487         # Write the data to file
488         if table_lst:
489             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491             logging.info(f"      Writing file: {file_name}")
492             with open(file_name, u"wt") as file_handler:
493                 file_handler.write(u",".join(header) + u"\n")
494                 for item in table_lst:
495                     file_handler.write(u",".join(item) + u"\n")
496
497     logging.info(u"  Done.")
498
499
500 def _tpc_modify_test_name(test_name, ignore_nic=False):
501     """Modify a test name by replacing its parts.
502
503     :param test_name: Test name to be modified.
504     :param ignore_nic: If True, NIC is removed from TC name.
505     :type test_name: str
506     :type ignore_nic: bool
507     :returns: Modified test name.
508     :rtype: str
509     """
510     test_name_mod = test_name.\
511         replace(u"-ndrpdr", u"").\
512         replace(u"1t1c", u"1c").\
513         replace(u"2t1c", u"1c"). \
514         replace(u"2t2c", u"2c").\
515         replace(u"4t2c", u"2c"). \
516         replace(u"4t4c", u"4c").\
517         replace(u"8t4c", u"4c")
518
519     if ignore_nic:
520         return re.sub(REGEX_NIC, u"", test_name_mod)
521     return test_name_mod
522
523
524 def _tpc_modify_displayed_test_name(test_name):
525     """Modify a test name which is displayed in a table by replacing its parts.
526
527     :param test_name: Test name to be modified.
528     :type test_name: str
529     :returns: Modified test name.
530     :rtype: str
531     """
532     return test_name.\
533         replace(u"1t1c", u"1c").\
534         replace(u"2t1c", u"1c"). \
535         replace(u"2t2c", u"2c").\
536         replace(u"4t2c", u"2c"). \
537         replace(u"4t4c", u"4c").\
538         replace(u"8t4c", u"4c")
539
540
541 def _tpc_insert_data(target, src, include_tests):
542     """Insert src data to the target structure.
543
544     :param target: Target structure where the data is placed.
545     :param src: Source data to be placed into the target structure.
546     :param include_tests: Which results will be included (MRR, NDR, PDR).
547     :type target: list
548     :type src: dict
549     :type include_tests: str
550     """
551     try:
552         if include_tests == u"MRR":
553             target[u"mean"] = src[u"result"][u"receive-rate"]
554             target[u"stdev"] = src[u"result"][u"receive-stdev"]
555         elif include_tests == u"PDR":
556             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
557         elif include_tests == u"NDR":
558             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
559         elif u"latency" in include_tests:
560             keys = include_tests.split(u"-")
561             if len(keys) == 4:
562                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
563                 target[u"data"].append(
564                     float(u"nan") if lat == -1 else lat * 1e6
565                 )
566         elif include_tests == u"hoststack":
567             try:
568                 target[u"data"].append(
569                     float(src[u"result"][u"bits_per_second"])
570                 )
571             except KeyError:
572                 target[u"data"].append(
573                     (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
574                     ((float(src[u"result"][u"client"][u"time"]) +
575                       float(src[u"result"][u"server"][u"time"])) / 2)
576                 )
577         elif include_tests == u"vsap":
578             try:
579                 target[u"data"].append(src[u"result"][u"cps"])
580             except KeyError:
581                 target[u"data"].append(src[u"result"][u"rps"])
582     except (KeyError, TypeError):
583         pass
584
585
586 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
587                              footnote=u"", sort_data=True, title=u"",
588                              generate_rst=True):
589     """Generate html table from input data with simple sorting possibility.
590
591     :param header: Table header.
592     :param data: Input data to be included in the table. It is a list of lists.
593         Inner lists are rows in the table. All inner lists must be of the same
594         length. The length of these lists must be the same as the length of the
595         header.
596     :param out_file_name: The name (relative or full path) where the
597         generated html table is written.
598     :param legend: The legend to display below the table.
599     :param footnote: The footnote to display below the table (and legend).
600     :param sort_data: If True the data sorting is enabled.
601     :param title: The table (and file) title.
602     :param generate_rst: If True, wrapping rst file is generated.
603     :type header: list
604     :type data: list of lists
605     :type out_file_name: str
606     :type legend: str
607     :type footnote: str
608     :type sort_data: bool
609     :type title: str
610     :type generate_rst: bool
611     """
612
613     try:
614         idx = header.index(u"Test Case")
615     except ValueError:
616         idx = 0
617     params = {
618         u"align-hdr": (
619             [u"left", u"right"],
620             [u"left", u"left", u"right"],
621             [u"left", u"left", u"left", u"right"]
622         ),
623         u"align-itm": (
624             [u"left", u"right"],
625             [u"left", u"left", u"right"],
626             [u"left", u"left", u"left", u"right"]
627         ),
628         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
629     }
630
631     df_data = pd.DataFrame(data, columns=header)
632
633     if sort_data:
634         df_sorted = [df_data.sort_values(
635             by=[key, header[idx]], ascending=[True, True]
636             if key != header[idx] else [False, True]) for key in header]
637         df_sorted_rev = [df_data.sort_values(
638             by=[key, header[idx]], ascending=[False, True]
639             if key != header[idx] else [True, True]) for key in header]
640         df_sorted.extend(df_sorted_rev)
641     else:
642         df_sorted = df_data
643
644     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
645                    for idx in range(len(df_data))]]
646     table_header = dict(
647         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
648         fill_color=u"#7eade7",
649         align=params[u"align-hdr"][idx],
650         font=dict(
651             family=u"Courier New",
652             size=12
653         )
654     )
655
656     fig = go.Figure()
657
658     if sort_data:
659         for table in df_sorted:
660             columns = [table.get(col) for col in header]
661             fig.add_trace(
662                 go.Table(
663                     columnwidth=params[u"width"][idx],
664                     header=table_header,
665                     cells=dict(
666                         values=columns,
667                         fill_color=fill_color,
668                         align=params[u"align-itm"][idx],
669                         font=dict(
670                             family=u"Courier New",
671                             size=12
672                         )
673                     )
674                 )
675             )
676
677         buttons = list()
678         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
679         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
680         for idx, hdr in enumerate(menu_items):
681             visible = [False, ] * len(menu_items)
682             visible[idx] = True
683             buttons.append(
684                 dict(
685                     label=hdr.replace(u" [Mpps]", u""),
686                     method=u"update",
687                     args=[{u"visible": visible}],
688                 )
689             )
690
691         fig.update_layout(
692             updatemenus=[
693                 go.layout.Updatemenu(
694                     type=u"dropdown",
695                     direction=u"down",
696                     x=0.0,
697                     xanchor=u"left",
698                     y=1.002,
699                     yanchor=u"bottom",
700                     active=len(menu_items) - 1,
701                     buttons=list(buttons)
702                 )
703             ],
704         )
705     else:
706         fig.add_trace(
707             go.Table(
708                 columnwidth=params[u"width"][idx],
709                 header=table_header,
710                 cells=dict(
711                     values=[df_sorted.get(col) for col in header],
712                     fill_color=fill_color,
713                     align=params[u"align-itm"][idx],
714                     font=dict(
715                         family=u"Courier New",
716                         size=12
717                     )
718                 )
719             )
720         )
721
722     ploff.plot(
723         fig,
724         show_link=False,
725         auto_open=False,
726         filename=f"{out_file_name}_in.html"
727     )
728
729     if not generate_rst:
730         return
731
732     file_name = out_file_name.split(u"/")[-1]
733     if u"vpp" in out_file_name:
734         path = u"_tmp/src/vpp_performance_tests/comparisons/"
735     else:
736         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
737     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
738     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
739         rst_file.write(
740             u"\n"
741             u".. |br| raw:: html\n\n    <br />\n\n\n"
742             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
743             u".. |preout| raw:: html\n\n    </pre>\n\n"
744         )
745         if title:
746             rst_file.write(f"{title}\n")
747             rst_file.write(f"{u'`' * len(title)}\n\n")
748         rst_file.write(
749             u".. raw:: html\n\n"
750             f'    <iframe frameborder="0" scrolling="no" '
751             f'width="1600" height="1200" '
752             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
753             f'</iframe>\n\n'
754         )
755
756         if legend:
757             try:
758                 itm_lst = legend[1:-2].split(u"\n")
759                 rst_file.write(
760                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
761                 )
762             except IndexError as err:
763                 logging.error(f"Legend cannot be written to html file\n{err}")
764         if footnote:
765             try:
766                 itm_lst = footnote[1:].split(u"\n")
767                 rst_file.write(
768                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
769                 )
770             except IndexError as err:
771                 logging.error(f"Footnote cannot be written to html file\n{err}")
772
773
774 def table_soak_vs_ndr(table, input_data):
775     """Generate the table(s) with algorithm: table_soak_vs_ndr
776     specified in the specification file.
777
778     :param table: Table to generate.
779     :param input_data: Data to process.
780     :type table: pandas.Series
781     :type input_data: InputData
782     """
783
784     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
785
786     # Transform the data
787     logging.info(
788         f"    Creating the data set for the {table.get(u'type', u'')} "
789         f"{table.get(u'title', u'')}."
790     )
791     data = input_data.filter_data(table, continue_on_error=True)
792
793     # Prepare the header of the table
794     try:
795         header = [
796             u"Test Case",
797             f"Avg({table[u'reference'][u'title']})",
798             f"Stdev({table[u'reference'][u'title']})",
799             f"Avg({table[u'compare'][u'title']})",
800             f"Stdev{table[u'compare'][u'title']})",
801             u"Diff",
802             u"Stdev(Diff)"
803         ]
804         header_str = u";".join(header) + u"\n"
805         legend = (
806             u"\nLegend:\n"
807             f"Avg({table[u'reference'][u'title']}): "
808             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
809             f"from a series of runs of the listed tests.\n"
810             f"Stdev({table[u'reference'][u'title']}): "
811             f"Standard deviation value of {table[u'reference'][u'title']} "
812             f"[Mpps] computed from a series of runs of the listed tests.\n"
813             f"Avg({table[u'compare'][u'title']}): "
814             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
815             f"a series of runs of the listed tests.\n"
816             f"Stdev({table[u'compare'][u'title']}): "
817             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
818             f"computed from a series of runs of the listed tests.\n"
819             f"Diff({table[u'reference'][u'title']},"
820             f"{table[u'compare'][u'title']}): "
821             f"Percentage change calculated for mean values.\n"
822             u"Stdev(Diff): "
823             u"Standard deviation of percentage change calculated for mean "
824             u"values."
825         )
826     except (AttributeError, KeyError) as err:
827         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
828         return
829
830     # Create a list of available SOAK test results:
831     tbl_dict = dict()
832     for job, builds in table[u"compare"][u"data"].items():
833         for build in builds:
834             for tst_name, tst_data in data[job][str(build)].items():
835                 if tst_data[u"type"] == u"SOAK":
836                     tst_name_mod = tst_name.replace(u"-soak", u"")
837                     if tbl_dict.get(tst_name_mod, None) is None:
838                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
839                         nic = groups.group(0) if groups else u""
840                         name = (
841                             f"{nic}-"
842                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
843                         )
844                         tbl_dict[tst_name_mod] = {
845                             u"name": name,
846                             u"ref-data": list(),
847                             u"cmp-data": list()
848                         }
849                     try:
850                         tbl_dict[tst_name_mod][u"cmp-data"].append(
851                             tst_data[u"throughput"][u"LOWER"])
852                     except (KeyError, TypeError):
853                         pass
854     tests_lst = tbl_dict.keys()
855
856     # Add corresponding NDR test results:
857     for job, builds in table[u"reference"][u"data"].items():
858         for build in builds:
859             for tst_name, tst_data in data[job][str(build)].items():
860                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
861                     replace(u"-mrr", u"")
862                 if tst_name_mod not in tests_lst:
863                     continue
864                 try:
865                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
866                         continue
867                     if table[u"include-tests"] == u"MRR":
868                         result = (tst_data[u"result"][u"receive-rate"],
869                                   tst_data[u"result"][u"receive-stdev"])
870                     elif table[u"include-tests"] == u"PDR":
871                         result = \
872                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
873                     elif table[u"include-tests"] == u"NDR":
874                         result = \
875                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
876                     else:
877                         result = None
878                     if result is not None:
879                         tbl_dict[tst_name_mod][u"ref-data"].append(
880                             result)
881                 except (KeyError, TypeError):
882                     continue
883
884     tbl_lst = list()
885     for tst_name in tbl_dict:
886         item = [tbl_dict[tst_name][u"name"], ]
887         data_r = tbl_dict[tst_name][u"ref-data"]
888         if data_r:
889             if table[u"include-tests"] == u"MRR":
890                 data_r_mean = data_r[0][0]
891                 data_r_stdev = data_r[0][1]
892             else:
893                 data_r_mean = mean(data_r)
894                 data_r_stdev = stdev(data_r)
895             item.append(round(data_r_mean / 1e6, 1))
896             item.append(round(data_r_stdev / 1e6, 1))
897         else:
898             data_r_mean = None
899             data_r_stdev = None
900             item.extend([None, None])
901         data_c = tbl_dict[tst_name][u"cmp-data"]
902         if data_c:
903             if table[u"include-tests"] == u"MRR":
904                 data_c_mean = data_c[0][0]
905                 data_c_stdev = data_c[0][1]
906             else:
907                 data_c_mean = mean(data_c)
908                 data_c_stdev = stdev(data_c)
909             item.append(round(data_c_mean / 1e6, 1))
910             item.append(round(data_c_stdev / 1e6, 1))
911         else:
912             data_c_mean = None
913             data_c_stdev = None
914             item.extend([None, None])
915         if data_r_mean is not None and data_c_mean is not None:
916             delta, d_stdev = relative_change_stdev(
917                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
918             try:
919                 item.append(round(delta))
920             except ValueError:
921                 item.append(delta)
922             try:
923                 item.append(round(d_stdev))
924             except ValueError:
925                 item.append(d_stdev)
926             tbl_lst.append(item)
927
928     # Sort the table according to the relative change
929     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
930
931     # Generate csv tables:
932     csv_file_name = f"{table[u'output-file']}.csv"
933     with open(csv_file_name, u"wt") as file_handler:
934         file_handler.write(header_str)
935         for test in tbl_lst:
936             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
937
938     convert_csv_to_pretty_txt(
939         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
940     )
941     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
942         file_handler.write(legend)
943
944     # Generate html table:
945     _tpc_generate_html_table(
946         header,
947         tbl_lst,
948         table[u'output-file'],
949         legend=legend,
950         title=table.get(u"title", u"")
951     )
952
953
954 def table_perf_trending_dash(table, input_data):
955     """Generate the table(s) with algorithm:
956     table_perf_trending_dash
957     specified in the specification file.
958
959     :param table: Table to generate.
960     :param input_data: Data to process.
961     :type table: pandas.Series
962     :type input_data: InputData
963     """
964
965     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
966
967     # Transform the data
968     logging.info(
969         f"    Creating the data set for the {table.get(u'type', u'')} "
970         f"{table.get(u'title', u'')}."
971     )
972     data = input_data.filter_data(table, continue_on_error=True)
973
974     # Prepare the header of the tables
975     header = [
976         u"Test Case",
977         u"Trend [Mpps]",
978         u"Runs [#]",
979         u"Long-Term Change [%]",
980         u"Regressions [#]",
981         u"Progressions [#]"
982     ]
983     header_str = u",".join(header) + u"\n"
984
985     incl_tests = table.get(u"include-tests", u"MRR")
986
987     # Prepare data to the table:
988     tbl_dict = dict()
989     for job, builds in table[u"data"].items():
990         for build in builds:
991             for tst_name, tst_data in data[job][str(build)].items():
992                 if tst_name.lower() in table.get(u"ignore-list", list()):
993                     continue
994                 if tbl_dict.get(tst_name, None) is None:
995                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
996                     if not groups:
997                         continue
998                     nic = groups.group(0)
999                     tbl_dict[tst_name] = {
1000                         u"name": f"{nic}-{tst_data[u'name']}",
1001                         u"data": OrderedDict()
1002                     }
1003                 try:
1004                     if incl_tests == u"MRR":
1005                         tbl_dict[tst_name][u"data"][str(build)] = \
1006                             tst_data[u"result"][u"receive-rate"]
1007                     elif incl_tests == u"NDR":
1008                         tbl_dict[tst_name][u"data"][str(build)] = \
1009                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1010                     elif incl_tests == u"PDR":
1011                         tbl_dict[tst_name][u"data"][str(build)] = \
1012                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1013                 except (TypeError, KeyError):
1014                     pass  # No data in output.xml for this test
1015
1016     tbl_lst = list()
1017     for tst_name in tbl_dict:
1018         data_t = tbl_dict[tst_name][u"data"]
1019         if len(data_t) < 2:
1020             continue
1021
1022         try:
1023             classification_lst, avgs, _ = classify_anomalies(data_t)
1024         except ValueError as err:
1025             logging.info(f"{err} Skipping")
1026             return
1027
1028         win_size = min(len(data_t), table[u"window"])
1029         long_win_size = min(len(data_t), table[u"long-trend-window"])
1030
1031         try:
1032             max_long_avg = max(
1033                 [x for x in avgs[-long_win_size:-win_size]
1034                  if not isnan(x)])
1035         except ValueError:
1036             max_long_avg = nan
1037         last_avg = avgs[-1]
1038         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1039
1040         nr_of_last_avgs = 0;
1041         for x in reversed(avgs):
1042             if x == last_avg:
1043                 nr_of_last_avgs += 1
1044             else:
1045                 break
1046
1047         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1048             rel_change_last = nan
1049         else:
1050             rel_change_last = round(
1051                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1052
1053         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1054             rel_change_long = nan
1055         else:
1056             rel_change_long = round(
1057                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1058
1059         if classification_lst:
1060             if isnan(rel_change_last) and isnan(rel_change_long):
1061                 continue
1062             if isnan(last_avg) or isnan(rel_change_last) or \
1063                     isnan(rel_change_long):
1064                 continue
1065             tbl_lst.append(
1066                 [tbl_dict[tst_name][u"name"],
1067                  round(last_avg / 1e6, 2),
1068                  nr_of_last_avgs,
1069                  rel_change_long,
1070                  classification_lst[-win_size+1:].count(u"regression"),
1071                  classification_lst[-win_size+1:].count(u"progression")])
1072
1073     tbl_lst.sort(key=lambda rel: rel[0])
1074     tbl_lst.sort(key=lambda rel: rel[2])
1075     tbl_lst.sort(key=lambda rel: rel[3])
1076     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1077     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1078
1079     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1080
1081     logging.info(f"    Writing file: {file_name}")
1082     with open(file_name, u"wt") as file_handler:
1083         file_handler.write(header_str)
1084         for test in tbl_lst:
1085             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1086
1087     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1088     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1089
1090
1091 def _generate_url(testbed, test_name):
1092     """Generate URL to a trending plot from the name of the test case.
1093
1094     :param testbed: The testbed used for testing.
1095     :param test_name: The name of the test case.
1096     :type testbed: str
1097     :type test_name: str
1098     :returns: The URL to the plot with the trending data for the given test
1099         case.
1100     :rtype str
1101     """
1102
1103     if u"x520" in test_name:
1104         nic = u"x520"
1105     elif u"x710" in test_name:
1106         nic = u"x710"
1107     elif u"xl710" in test_name:
1108         nic = u"xl710"
1109     elif u"xxv710" in test_name:
1110         nic = u"xxv710"
1111     elif u"vic1227" in test_name:
1112         nic = u"vic1227"
1113     elif u"vic1385" in test_name:
1114         nic = u"vic1385"
1115     elif u"x553" in test_name:
1116         nic = u"x553"
1117     elif u"cx556" in test_name or u"cx556a" in test_name:
1118         nic = u"cx556a"
1119     elif u"ena" in test_name:
1120         nic = u"nitro50g"
1121     else:
1122         nic = u""
1123
1124     if u"64b" in test_name:
1125         frame_size = u"64b"
1126     elif u"78b" in test_name:
1127         frame_size = u"78b"
1128     elif u"imix" in test_name:
1129         frame_size = u"imix"
1130     elif u"9000b" in test_name:
1131         frame_size = u"9000b"
1132     elif u"1518b" in test_name:
1133         frame_size = u"1518b"
1134     elif u"114b" in test_name:
1135         frame_size = u"114b"
1136     else:
1137         frame_size = u""
1138
1139     if u"1t1c" in test_name or \
1140         (u"-1c-" in test_name and
1141          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1142         cores = u"1t1c"
1143     elif u"2t2c" in test_name or \
1144          (u"-2c-" in test_name and
1145           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1146         cores = u"2t2c"
1147     elif u"4t4c" in test_name or \
1148          (u"-4c-" in test_name and
1149           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1150         cores = u"4t4c"
1151     elif u"2t1c" in test_name or \
1152          (u"-1c-" in test_name and
1153           testbed in
1154           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1155            u"2n-aws", u"3n-aws")):
1156         cores = u"2t1c"
1157     elif u"4t2c" in test_name or \
1158          (u"-2c-" in test_name and
1159           testbed in
1160           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1161            u"2n-aws", u"3n-aws")):
1162         cores = u"4t2c"
1163     elif u"8t4c" in test_name or \
1164          (u"-4c-" in test_name and
1165           testbed in
1166           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1167            u"2n-aws", u"3n-aws")):
1168         cores = u"8t4c"
1169     else:
1170         cores = u""
1171
1172     if u"testpmd" in test_name:
1173         driver = u"testpmd"
1174     elif u"l3fwd" in test_name:
1175         driver = u"l3fwd"
1176     elif u"avf" in test_name:
1177         driver = u"avf"
1178     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1179         driver = u"af_xdp"
1180     elif u"rdma" in test_name:
1181         driver = u"rdma"
1182     elif u"dnv" in testbed or u"tsh" in testbed:
1183         driver = u"ixgbe"
1184     elif u"ena" in test_name:
1185         driver = u"ena"
1186     else:
1187         driver = u"dpdk"
1188
1189     if u"macip-iacl1s" in test_name:
1190         bsf = u"features-macip-iacl1"
1191     elif u"macip-iacl10s" in test_name:
1192         bsf = u"features-macip-iacl10"
1193     elif u"macip-iacl50s" in test_name:
1194         bsf = u"features-macip-iacl50"
1195     elif u"iacl1s" in test_name:
1196         bsf = u"features-iacl1"
1197     elif u"iacl10s" in test_name:
1198         bsf = u"features-iacl10"
1199     elif u"iacl50s" in test_name:
1200         bsf = u"features-iacl50"
1201     elif u"oacl1s" in test_name:
1202         bsf = u"features-oacl1"
1203     elif u"oacl10s" in test_name:
1204         bsf = u"features-oacl10"
1205     elif u"oacl50s" in test_name:
1206         bsf = u"features-oacl50"
1207     elif u"nat44det" in test_name:
1208         bsf = u"nat44det-bidir"
1209     elif u"nat44ed" in test_name and u"udir" in test_name:
1210         bsf = u"nat44ed-udir"
1211     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1212         bsf = u"udp-cps"
1213     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1214         bsf = u"tcp-cps"
1215     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1216         bsf = u"udp-pps"
1217     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1218         bsf = u"tcp-pps"
1219     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1220         bsf = u"udp-tput"
1221     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1222         bsf = u"tcp-tput"
1223     elif u"udpsrcscale" in test_name:
1224         bsf = u"features-udp"
1225     elif u"iacl" in test_name:
1226         bsf = u"features"
1227     elif u"policer" in test_name:
1228         bsf = u"features"
1229     elif u"adl" in test_name:
1230         bsf = u"features"
1231     elif u"cop" in test_name:
1232         bsf = u"features"
1233     elif u"nat" in test_name:
1234         bsf = u"features"
1235     elif u"macip" in test_name:
1236         bsf = u"features"
1237     elif u"scale" in test_name:
1238         bsf = u"scale"
1239     elif u"base" in test_name:
1240         bsf = u"base"
1241     else:
1242         bsf = u"base"
1243
1244     if u"114b" in test_name and u"vhost" in test_name:
1245         domain = u"vts"
1246     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1247         domain = u"nat44"
1248         if u"nat44det" in test_name:
1249             domain += u"-det-bidir"
1250         else:
1251             domain += u"-ed"
1252         if u"udir" in test_name:
1253             domain += u"-unidir"
1254         elif u"-ethip4udp-" in test_name:
1255             domain += u"-udp"
1256         elif u"-ethip4tcp-" in test_name:
1257             domain += u"-tcp"
1258         if u"-cps" in test_name:
1259             domain += u"-cps"
1260         elif u"-pps" in test_name:
1261             domain += u"-pps"
1262         elif u"-tput" in test_name:
1263             domain += u"-tput"
1264     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1265         domain = u"dpdk"
1266     elif u"memif" in test_name:
1267         domain = u"container_memif"
1268     elif u"srv6" in test_name:
1269         domain = u"srv6"
1270     elif u"vhost" in test_name:
1271         domain = u"vhost"
1272         if u"vppl2xc" in test_name:
1273             driver += u"-vpp"
1274         else:
1275             driver += u"-testpmd"
1276         if u"lbvpplacp" in test_name:
1277             bsf += u"-link-bonding"
1278     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1279         domain = u"nf_service_density_vnfc"
1280     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1281         domain = u"nf_service_density_cnfc"
1282     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1283         domain = u"nf_service_density_cnfp"
1284     elif u"ipsec" in test_name:
1285         domain = u"ipsec"
1286         if u"sw" in test_name:
1287             bsf += u"-sw"
1288         elif u"hw" in test_name:
1289             bsf += u"-hw"
1290         elif u"spe" in test_name:
1291             bsf += u"-spe"
1292     elif u"ethip4vxlan" in test_name:
1293         domain = u"ip4_tunnels"
1294     elif u"ethip4udpgeneve" in test_name:
1295         domain = u"ip4_tunnels"
1296     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1297         domain = u"ip4"
1298     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1299         domain = u"ip6"
1300     elif u"l2xcbase" in test_name or \
1301             u"l2xcscale" in test_name or \
1302             u"l2bdbasemaclrn" in test_name or \
1303             u"l2bdscale" in test_name or \
1304             u"l2patch" in test_name:
1305         domain = u"l2"
1306     else:
1307         domain = u""
1308
1309     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1310     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1311
1312     return file_name + anchor_name
1313
1314
1315 def table_perf_trending_dash_html(table, input_data):
1316     """Generate the table(s) with algorithm:
1317     table_perf_trending_dash_html specified in the specification
1318     file.
1319
1320     :param table: Table to generate.
1321     :param input_data: Data to process.
1322     :type table: dict
1323     :type input_data: InputData
1324     """
1325
1326     _ = input_data
1327
1328     if not table.get(u"testbed", None):
1329         logging.error(
1330             f"The testbed is not defined for the table "
1331             f"{table.get(u'title', u'')}. Skipping."
1332         )
1333         return
1334
1335     test_type = table.get(u"test-type", u"MRR")
1336     if test_type not in (u"MRR", u"NDR", u"PDR"):
1337         logging.error(
1338             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1339             f"Skipping."
1340         )
1341         return
1342
1343     if test_type in (u"NDR", u"PDR"):
1344         lnk_dir = u"../ndrpdr_trending/"
1345         lnk_sufix = f"-{test_type.lower()}"
1346     else:
1347         lnk_dir = u"../trending/"
1348         lnk_sufix = u""
1349
1350     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1351
1352     try:
1353         with open(table[u"input-file"], u'rt') as csv_file:
1354             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1355     except FileNotFoundError as err:
1356         logging.warning(f"{err}")
1357         return
1358     except KeyError:
1359         logging.warning(u"The input file is not defined.")
1360         return
1361     except csv.Error as err:
1362         logging.warning(
1363             f"Not possible to process the file {table[u'input-file']}.\n"
1364             f"{repr(err)}"
1365         )
1366         return
1367
1368     # Table:
1369     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1370
1371     # Table header:
1372     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1373     for idx, item in enumerate(csv_lst[0]):
1374         alignment = u"left" if idx == 0 else u"center"
1375         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1376         thead.text = item
1377
1378     # Rows:
1379     colors = {
1380         u"regression": (
1381             u"#ffcccc",
1382             u"#ff9999"
1383         ),
1384         u"progression": (
1385             u"#c6ecc6",
1386             u"#9fdf9f"
1387         ),
1388         u"normal": (
1389             u"#e9f1fb",
1390             u"#d4e4f7"
1391         )
1392     }
1393     for r_idx, row in enumerate(csv_lst[1:]):
1394         if int(row[4]):
1395             color = u"regression"
1396         elif int(row[5]):
1397             color = u"progression"
1398         else:
1399             color = u"normal"
1400         trow = ET.SubElement(
1401             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1402         )
1403
1404         # Columns:
1405         for c_idx, item in enumerate(row):
1406             tdata = ET.SubElement(
1407                 trow,
1408                 u"td",
1409                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1410             )
1411             # Name:
1412             if c_idx == 0 and table.get(u"add-links", True):
1413                 ref = ET.SubElement(
1414                     tdata,
1415                     u"a",
1416                     attrib=dict(
1417                         href=f"{lnk_dir}"
1418                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1419                         f"{lnk_sufix}"
1420                     )
1421                 )
1422                 ref.text = item
1423             else:
1424                 tdata.text = item
1425     try:
1426         with open(table[u"output-file"], u'w') as html_file:
1427             logging.info(f"    Writing file: {table[u'output-file']}")
1428             html_file.write(u".. raw:: html\n\n\t")
1429             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1430             html_file.write(u"\n\t<p><br><br></p>\n")
1431     except KeyError:
1432         logging.warning(u"The output file is not defined.")
1433         return
1434
1435
1436 def table_last_failed_tests(table, input_data):
1437     """Generate the table(s) with algorithm: table_last_failed_tests
1438     specified in the specification file.
1439
1440     :param table: Table to generate.
1441     :param input_data: Data to process.
1442     :type table: pandas.Series
1443     :type input_data: InputData
1444     """
1445
1446     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1447
1448     # Transform the data
1449     logging.info(
1450         f"    Creating the data set for the {table.get(u'type', u'')} "
1451         f"{table.get(u'title', u'')}."
1452     )
1453
1454     data = input_data.filter_data(table, continue_on_error=True)
1455
1456     if data is None or data.empty:
1457         logging.warning(
1458             f"    No data for the {table.get(u'type', u'')} "
1459             f"{table.get(u'title', u'')}."
1460         )
1461         return
1462
1463     tbl_list = list()
1464     for job, builds in table[u"data"].items():
1465         for build in builds:
1466             build = str(build)
1467             try:
1468                 version = input_data.metadata(job, build).get(u"version", u"")
1469                 duration = \
1470                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1471             except KeyError:
1472                 logging.error(f"Data for {job}: {build} is not present.")
1473                 return
1474             tbl_list.append(build)
1475             tbl_list.append(version)
1476             failed_tests = list()
1477             passed = 0
1478             failed = 0
1479             for tst_data in data[job][build].values:
1480                 if tst_data[u"status"] != u"FAIL":
1481                     passed += 1
1482                     continue
1483                 failed += 1
1484                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1485                 if not groups:
1486                     continue
1487                 nic = groups.group(0)
1488                 msg = tst_data[u'msg'].replace(u"\n", u"")
1489                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1490                              'xxx.xxx.xxx.xxx', msg)
1491                 msg = msg.split(u'Also teardown failed')[0]
1492                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1493             tbl_list.append(passed)
1494             tbl_list.append(failed)
1495             tbl_list.append(duration)
1496             tbl_list.extend(failed_tests)
1497
1498     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1499     logging.info(f"    Writing file: {file_name}")
1500     with open(file_name, u"wt") as file_handler:
1501         for test in tbl_list:
1502             file_handler.write(f"{test}\n")
1503
1504
1505 def table_failed_tests(table, input_data):
1506     """Generate the table(s) with algorithm: table_failed_tests
1507     specified in the specification file.
1508
1509     :param table: Table to generate.
1510     :param input_data: Data to process.
1511     :type table: pandas.Series
1512     :type input_data: InputData
1513     """
1514
1515     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1516
1517     # Transform the data
1518     logging.info(
1519         f"    Creating the data set for the {table.get(u'type', u'')} "
1520         f"{table.get(u'title', u'')}."
1521     )
1522     data = input_data.filter_data(table, continue_on_error=True)
1523
1524     test_type = u"MRR"
1525     if u"NDRPDR" in table.get(u"filter", list()):
1526         test_type = u"NDRPDR"
1527
1528     # Prepare the header of the tables
1529     header = [
1530         u"Test Case",
1531         u"Failures [#]",
1532         u"Last Failure [Time]",
1533         u"Last Failure [VPP-Build-Id]",
1534         u"Last Failure [CSIT-Job-Build-Id]"
1535     ]
1536
1537     # Generate the data for the table according to the model in the table
1538     # specification
1539
1540     now = dt.utcnow()
1541     timeperiod = timedelta(int(table.get(u"window", 7)))
1542
1543     tbl_dict = dict()
1544     for job, builds in table[u"data"].items():
1545         for build in builds:
1546             build = str(build)
1547             for tst_name, tst_data in data[job][build].items():
1548                 if tst_name.lower() in table.get(u"ignore-list", list()):
1549                     continue
1550                 if tbl_dict.get(tst_name, None) is None:
1551                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1552                     if not groups:
1553                         continue
1554                     nic = groups.group(0)
1555                     tbl_dict[tst_name] = {
1556                         u"name": f"{nic}-{tst_data[u'name']}",
1557                         u"data": OrderedDict()
1558                     }
1559                 try:
1560                     generated = input_data.metadata(job, build).\
1561                         get(u"generated", u"")
1562                     if not generated:
1563                         continue
1564                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1565                     if (now - then) <= timeperiod:
1566                         tbl_dict[tst_name][u"data"][build] = (
1567                             tst_data[u"status"],
1568                             generated,
1569                             input_data.metadata(job, build).get(u"version",
1570                                                                 u""),
1571                             build
1572                         )
1573                 except (TypeError, KeyError) as err:
1574                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1575
1576     max_fails = 0
1577     tbl_lst = list()
1578     for tst_data in tbl_dict.values():
1579         fails_nr = 0
1580         fails_last_date = u""
1581         fails_last_vpp = u""
1582         fails_last_csit = u""
1583         for val in tst_data[u"data"].values():
1584             if val[0] == u"FAIL":
1585                 fails_nr += 1
1586                 fails_last_date = val[1]
1587                 fails_last_vpp = val[2]
1588                 fails_last_csit = val[3]
1589         if fails_nr:
1590             max_fails = fails_nr if fails_nr > max_fails else max_fails
1591             tbl_lst.append([
1592                 tst_data[u"name"],
1593                 fails_nr,
1594                 fails_last_date,
1595                 fails_last_vpp,
1596                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1597                 f"-build-{fails_last_csit}"
1598             ])
1599
1600     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1601     tbl_sorted = list()
1602     for nrf in range(max_fails, -1, -1):
1603         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1604         tbl_sorted.extend(tbl_fails)
1605
1606     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1607     logging.info(f"    Writing file: {file_name}")
1608     with open(file_name, u"wt") as file_handler:
1609         file_handler.write(u",".join(header) + u"\n")
1610         for test in tbl_sorted:
1611             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1612
1613     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1614     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1615
1616
1617 def table_failed_tests_html(table, input_data):
1618     """Generate the table(s) with algorithm: table_failed_tests_html
1619     specified in the specification file.
1620
1621     :param table: Table to generate.
1622     :param input_data: Data to process.
1623     :type table: pandas.Series
1624     :type input_data: InputData
1625     """
1626
1627     _ = input_data
1628
1629     if not table.get(u"testbed", None):
1630         logging.error(
1631             f"The testbed is not defined for the table "
1632             f"{table.get(u'title', u'')}. Skipping."
1633         )
1634         return
1635
1636     test_type = table.get(u"test-type", u"MRR")
1637     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1638         logging.error(
1639             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1640             f"Skipping."
1641         )
1642         return
1643
1644     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1645         lnk_dir = u"../ndrpdr_trending/"
1646         lnk_sufix = u"-pdr"
1647     else:
1648         lnk_dir = u"../trending/"
1649         lnk_sufix = u""
1650
1651     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1652
1653     try:
1654         with open(table[u"input-file"], u'rt') as csv_file:
1655             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1656     except KeyError:
1657         logging.warning(u"The input file is not defined.")
1658         return
1659     except csv.Error as err:
1660         logging.warning(
1661             f"Not possible to process the file {table[u'input-file']}.\n"
1662             f"{repr(err)}"
1663         )
1664         return
1665
1666     # Table:
1667     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1668
1669     # Table header:
1670     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1671     for idx, item in enumerate(csv_lst[0]):
1672         alignment = u"left" if idx == 0 else u"center"
1673         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1674         thead.text = item
1675
1676     # Rows:
1677     colors = (u"#e9f1fb", u"#d4e4f7")
1678     for r_idx, row in enumerate(csv_lst[1:]):
1679         background = colors[r_idx % 2]
1680         trow = ET.SubElement(
1681             failed_tests, u"tr", attrib=dict(bgcolor=background)
1682         )
1683
1684         # Columns:
1685         for c_idx, item in enumerate(row):
1686             tdata = ET.SubElement(
1687                 trow,
1688                 u"td",
1689                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1690             )
1691             # Name:
1692             if c_idx == 0 and table.get(u"add-links", True):
1693                 ref = ET.SubElement(
1694                     tdata,
1695                     u"a",
1696                     attrib=dict(
1697                         href=f"{lnk_dir}"
1698                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1699                         f"{lnk_sufix}"
1700                     )
1701                 )
1702                 ref.text = item
1703             else:
1704                 tdata.text = item
1705     try:
1706         with open(table[u"output-file"], u'w') as html_file:
1707             logging.info(f"    Writing file: {table[u'output-file']}")
1708             html_file.write(u".. raw:: html\n\n\t")
1709             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1710             html_file.write(u"\n\t<p><br><br></p>\n")
1711     except KeyError:
1712         logging.warning(u"The output file is not defined.")
1713         return
1714
1715
1716 def table_comparison(table, input_data):
1717     """Generate the table(s) with algorithm: table_comparison
1718     specified in the specification file.
1719
1720     :param table: Table to generate.
1721     :param input_data: Data to process.
1722     :type table: pandas.Series
1723     :type input_data: InputData
1724     """
1725     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1726
1727     # Transform the data
1728     logging.info(
1729         f"    Creating the data set for the {table.get(u'type', u'')} "
1730         f"{table.get(u'title', u'')}."
1731     )
1732
1733     columns = table.get(u"columns", None)
1734     if not columns:
1735         logging.error(
1736             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1737         )
1738         return
1739
1740     cols = list()
1741     for idx, col in enumerate(columns):
1742         if col.get(u"data-set", None) is None:
1743             logging.warning(f"No data for column {col.get(u'title', u'')}")
1744             continue
1745         tag = col.get(u"tag", None)
1746         data = input_data.filter_data(
1747             table,
1748             params=[
1749                 u"throughput",
1750                 u"result",
1751                 u"latency",
1752                 u"name",
1753                 u"parent",
1754                 u"tags"
1755             ],
1756             data=col[u"data-set"],
1757             continue_on_error=True
1758         )
1759         col_data = {
1760             u"title": col.get(u"title", f"Column{idx}"),
1761             u"data": dict()
1762         }
1763         for builds in data.values:
1764             for build in builds:
1765                 for tst_name, tst_data in build.items():
1766                     if tag and tag not in tst_data[u"tags"]:
1767                         continue
1768                     tst_name_mod = \
1769                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1770                         replace(u"2n1l-", u"")
1771                     if col_data[u"data"].get(tst_name_mod, None) is None:
1772                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1773                         if u"across testbeds" in table[u"title"].lower() or \
1774                                 u"across topologies" in table[u"title"].lower():
1775                             name = _tpc_modify_displayed_test_name(name)
1776                         col_data[u"data"][tst_name_mod] = {
1777                             u"name": name,
1778                             u"replace": True,
1779                             u"data": list(),
1780                             u"mean": None,
1781                             u"stdev": None
1782                         }
1783                     _tpc_insert_data(
1784                         target=col_data[u"data"][tst_name_mod],
1785                         src=tst_data,
1786                         include_tests=table[u"include-tests"]
1787                     )
1788
1789         replacement = col.get(u"data-replacement", None)
1790         if replacement:
1791             rpl_data = input_data.filter_data(
1792                 table,
1793                 params=[
1794                     u"throughput",
1795                     u"result",
1796                     u"latency",
1797                     u"name",
1798                     u"parent",
1799                     u"tags"
1800                 ],
1801                 data=replacement,
1802                 continue_on_error=True
1803             )
1804             for builds in rpl_data.values:
1805                 for build in builds:
1806                     for tst_name, tst_data in build.items():
1807                         if tag and tag not in tst_data[u"tags"]:
1808                             continue
1809                         tst_name_mod = \
1810                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1811                             replace(u"2n1l-", u"")
1812                         if col_data[u"data"].get(tst_name_mod, None) is None:
1813                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1814                             if u"across testbeds" in table[u"title"].lower() \
1815                                     or u"across topologies" in \
1816                                     table[u"title"].lower():
1817                                 name = _tpc_modify_displayed_test_name(name)
1818                             col_data[u"data"][tst_name_mod] = {
1819                                 u"name": name,
1820                                 u"replace": False,
1821                                 u"data": list(),
1822                                 u"mean": None,
1823                                 u"stdev": None
1824                             }
1825                         if col_data[u"data"][tst_name_mod][u"replace"]:
1826                             col_data[u"data"][tst_name_mod][u"replace"] = False
1827                             col_data[u"data"][tst_name_mod][u"data"] = list()
1828                         _tpc_insert_data(
1829                             target=col_data[u"data"][tst_name_mod],
1830                             src=tst_data,
1831                             include_tests=table[u"include-tests"]
1832                         )
1833
1834         if table[u"include-tests"] in (u"NDR", u"PDR", u"hoststack", u"vsap") \
1835                 or u"latency" in table[u"include-tests"]:
1836             for tst_name, tst_data in col_data[u"data"].items():
1837                 if tst_data[u"data"]:
1838                     tst_data[u"mean"] = mean(tst_data[u"data"])
1839                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1840
1841         cols.append(col_data)
1842
1843     tbl_dict = dict()
1844     for col in cols:
1845         for tst_name, tst_data in col[u"data"].items():
1846             if tbl_dict.get(tst_name, None) is None:
1847                 tbl_dict[tst_name] = {
1848                     "name": tst_data[u"name"]
1849                 }
1850             tbl_dict[tst_name][col[u"title"]] = {
1851                 u"mean": tst_data[u"mean"],
1852                 u"stdev": tst_data[u"stdev"]
1853             }
1854
1855     if not tbl_dict:
1856         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1857         return
1858
1859     tbl_lst = list()
1860     for tst_data in tbl_dict.values():
1861         row = [tst_data[u"name"], ]
1862         for col in cols:
1863             row.append(tst_data.get(col[u"title"], None))
1864         tbl_lst.append(row)
1865
1866     comparisons = table.get(u"comparisons", None)
1867     rcas = list()
1868     if comparisons and isinstance(comparisons, list):
1869         for idx, comp in enumerate(comparisons):
1870             try:
1871                 col_ref = int(comp[u"reference"])
1872                 col_cmp = int(comp[u"compare"])
1873             except KeyError:
1874                 logging.warning(u"Comparison: No references defined! Skipping.")
1875                 comparisons.pop(idx)
1876                 continue
1877             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1878                     col_ref == col_cmp):
1879                 logging.warning(f"Wrong values of reference={col_ref} "
1880                                 f"and/or compare={col_cmp}. Skipping.")
1881                 comparisons.pop(idx)
1882                 continue
1883             rca_file_name = comp.get(u"rca-file", None)
1884             if rca_file_name:
1885                 try:
1886                     with open(rca_file_name, u"r") as file_handler:
1887                         rcas.append(
1888                             {
1889                                 u"title": f"RCA{idx + 1}",
1890                                 u"data": load(file_handler, Loader=FullLoader)
1891                             }
1892                         )
1893                 except (YAMLError, IOError) as err:
1894                     logging.warning(
1895                         f"The RCA file {rca_file_name} does not exist or "
1896                         f"it is corrupted!"
1897                     )
1898                     logging.debug(repr(err))
1899                     rcas.append(None)
1900             else:
1901                 rcas.append(None)
1902     else:
1903         comparisons = None
1904
1905     tbl_cmp_lst = list()
1906     if comparisons:
1907         for row in tbl_lst:
1908             new_row = deepcopy(row)
1909             for comp in comparisons:
1910                 ref_itm = row[int(comp[u"reference"])]
1911                 if ref_itm is None and \
1912                         comp.get(u"reference-alt", None) is not None:
1913                     ref_itm = row[int(comp[u"reference-alt"])]
1914                 cmp_itm = row[int(comp[u"compare"])]
1915                 if ref_itm is not None and cmp_itm is not None and \
1916                         ref_itm[u"mean"] is not None and \
1917                         cmp_itm[u"mean"] is not None and \
1918                         ref_itm[u"stdev"] is not None and \
1919                         cmp_itm[u"stdev"] is not None:
1920                     try:
1921                         delta, d_stdev = relative_change_stdev(
1922                             ref_itm[u"mean"], cmp_itm[u"mean"],
1923                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1924                         )
1925                     except ZeroDivisionError:
1926                         break
1927                     if delta is None or math.isnan(delta):
1928                         break
1929                     new_row.append({
1930                         u"mean": delta * 1e6,
1931                         u"stdev": d_stdev * 1e6
1932                     })
1933                 else:
1934                     break
1935             else:
1936                 tbl_cmp_lst.append(new_row)
1937
1938     try:
1939         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1940         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1941     except TypeError as err:
1942         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1943
1944     tbl_for_csv = list()
1945     for line in tbl_cmp_lst:
1946         row = [line[0], ]
1947         for idx, itm in enumerate(line[1:]):
1948             if itm is None or not isinstance(itm, dict) or\
1949                     itm.get(u'mean', None) is None or \
1950                     itm.get(u'stdev', None) is None:
1951                 row.append(u"NT")
1952                 row.append(u"NT")
1953             else:
1954                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1955                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1956         for rca in rcas:
1957             if rca is None:
1958                 continue
1959             rca_nr = rca[u"data"].get(row[0], u"-")
1960             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1961         tbl_for_csv.append(row)
1962
1963     header_csv = [u"Test Case", ]
1964     for col in cols:
1965         header_csv.append(f"Avg({col[u'title']})")
1966         header_csv.append(f"Stdev({col[u'title']})")
1967     for comp in comparisons:
1968         header_csv.append(
1969             f"Avg({comp.get(u'title', u'')})"
1970         )
1971         header_csv.append(
1972             f"Stdev({comp.get(u'title', u'')})"
1973         )
1974     for rca in rcas:
1975         if rca:
1976             header_csv.append(rca[u"title"])
1977
1978     legend_lst = table.get(u"legend", None)
1979     if legend_lst is None:
1980         legend = u""
1981     else:
1982         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1983
1984     footnote = u""
1985     if rcas and any(rcas):
1986         footnote += u"\nRoot Cause Analysis:\n"
1987         for rca in rcas:
1988             if rca:
1989                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1990
1991     csv_file_name = f"{table[u'output-file']}-csv.csv"
1992     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1993         file_handler.write(
1994             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1995         )
1996         for test in tbl_for_csv:
1997             file_handler.write(
1998                 u",".join([f'"{item}"' for item in test]) + u"\n"
1999             )
2000         if legend_lst:
2001             for item in legend_lst:
2002                 file_handler.write(f'"{item}"\n')
2003         if footnote:
2004             for itm in footnote.split(u"\n"):
2005                 file_handler.write(f'"{itm}"\n')
2006
2007     tbl_tmp = list()
2008     max_lens = [0, ] * len(tbl_cmp_lst[0])
2009     for line in tbl_cmp_lst:
2010         row = [line[0], ]
2011         for idx, itm in enumerate(line[1:]):
2012             if itm is None or not isinstance(itm, dict) or \
2013                     itm.get(u'mean', None) is None or \
2014                     itm.get(u'stdev', None) is None:
2015                 new_itm = u"NT"
2016             else:
2017                 if idx < len(cols):
2018                     new_itm = (
2019                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
2020                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2021                         replace(u"nan", u"NaN")
2022                     )
2023                 else:
2024                     new_itm = (
2025                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2026                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2027                         replace(u"nan", u"NaN")
2028                     )
2029             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2030                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2031             row.append(new_itm)
2032
2033         tbl_tmp.append(row)
2034
2035     header = [u"Test Case", ]
2036     header.extend([col[u"title"] for col in cols])
2037     header.extend([comp.get(u"title", u"") for comp in comparisons])
2038
2039     tbl_final = list()
2040     for line in tbl_tmp:
2041         row = [line[0], ]
2042         for idx, itm in enumerate(line[1:]):
2043             if itm in (u"NT", u"NaN"):
2044                 row.append(itm)
2045                 continue
2046             itm_lst = itm.rsplit(u"\u00B1", 1)
2047             itm_lst[-1] = \
2048                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2049             itm_str = u"\u00B1".join(itm_lst)
2050
2051             if idx >= len(cols):
2052                 # Diffs
2053                 rca = rcas[idx - len(cols)]
2054                 if rca:
2055                     # Add rcas to diffs
2056                     rca_nr = rca[u"data"].get(row[0], None)
2057                     if rca_nr:
2058                         hdr_len = len(header[idx + 1]) - 1
2059                         if hdr_len < 19:
2060                             hdr_len = 19
2061                         rca_nr = f"[{rca_nr}]"
2062                         itm_str = (
2063                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2064                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2065                             f"{itm_str}"
2066                         )
2067             row.append(itm_str)
2068         tbl_final.append(row)
2069
2070     # Generate csv tables:
2071     csv_file_name = f"{table[u'output-file']}.csv"
2072     logging.info(f"    Writing the file {csv_file_name}")
2073     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2074         file_handler.write(u";".join(header) + u"\n")
2075         for test in tbl_final:
2076             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2077
2078     # Generate txt table:
2079     txt_file_name = f"{table[u'output-file']}.txt"
2080     logging.info(f"    Writing the file {txt_file_name}")
2081     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2082
2083     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2084         file_handler.write(legend)
2085         file_handler.write(footnote)
2086
2087     # Generate html table:
2088     _tpc_generate_html_table(
2089         header,
2090         tbl_final,
2091         table[u'output-file'],
2092         legend=legend,
2093         footnote=footnote,
2094         sort_data=False,
2095         title=table.get(u"title", u"")
2096     )
2097
2098
2099 def table_weekly_comparison(table, in_data):
2100     """Generate the table(s) with algorithm: table_weekly_comparison
2101     specified in the specification file.
2102
2103     :param table: Table to generate.
2104     :param in_data: Data to process.
2105     :type table: pandas.Series
2106     :type in_data: InputData
2107     """
2108     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2109
2110     # Transform the data
2111     logging.info(
2112         f"    Creating the data set for the {table.get(u'type', u'')} "
2113         f"{table.get(u'title', u'')}."
2114     )
2115
2116     incl_tests = table.get(u"include-tests", None)
2117     if incl_tests not in (u"NDR", u"PDR"):
2118         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2119         return
2120
2121     nr_cols = table.get(u"nr-of-data-columns", None)
2122     if not nr_cols or nr_cols < 2:
2123         logging.error(
2124             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2125         )
2126         return
2127
2128     data = in_data.filter_data(
2129         table,
2130         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2131         continue_on_error=True
2132     )
2133
2134     header = [
2135         [u"VPP Version", ],
2136         [u"Start Timestamp", ],
2137         [u"CSIT Build", ],
2138         [u"CSIT Testbed", ]
2139     ]
2140     tbl_dict = dict()
2141     idx = 0
2142     tb_tbl = table.get(u"testbeds", None)
2143     for job_name, job_data in data.items():
2144         for build_nr, build in job_data.items():
2145             if idx >= nr_cols:
2146                 break
2147             if build.empty:
2148                 continue
2149
2150             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2151             if tb_ip and tb_tbl:
2152                 testbed = tb_tbl.get(tb_ip, u"")
2153             else:
2154                 testbed = u""
2155             header[2].insert(1, build_nr)
2156             header[3].insert(1, testbed)
2157             header[1].insert(
2158                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2159             )
2160             header[0].insert(
2161                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2162             )
2163
2164             for tst_name, tst_data in build.items():
2165                 tst_name_mod = \
2166                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2167                 if not tbl_dict.get(tst_name_mod, None):
2168                     tbl_dict[tst_name_mod] = dict(
2169                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2170                     )
2171                 try:
2172                     tbl_dict[tst_name_mod][-idx - 1] = \
2173                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2174                 except (TypeError, IndexError, KeyError, ValueError):
2175                     pass
2176             idx += 1
2177
2178     if idx < nr_cols:
2179         logging.error(u"Not enough data to build the table! Skipping")
2180         return
2181
2182     cmp_dict = dict()
2183     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2184         idx_ref = cmp.get(u"reference", None)
2185         idx_cmp = cmp.get(u"compare", None)
2186         if idx_ref is None or idx_cmp is None:
2187             continue
2188         header[0].append(
2189             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2190             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2191         )
2192         header[1].append(u"")
2193         header[2].append(u"")
2194         header[3].append(u"")
2195         for tst_name, tst_data in tbl_dict.items():
2196             if not cmp_dict.get(tst_name, None):
2197                 cmp_dict[tst_name] = list()
2198             ref_data = tst_data.get(idx_ref, None)
2199             cmp_data = tst_data.get(idx_cmp, None)
2200             if ref_data is None or cmp_data is None:
2201                 cmp_dict[tst_name].append(float(u'nan'))
2202             else:
2203                 cmp_dict[tst_name].append(
2204                     relative_change(ref_data, cmp_data)
2205                 )
2206
2207     tbl_lst_none = list()
2208     tbl_lst = list()
2209     for tst_name, tst_data in tbl_dict.items():
2210         itm_lst = [tst_data[u"name"], ]
2211         for idx in range(nr_cols):
2212             item = tst_data.get(-idx - 1, None)
2213             if item is None:
2214                 itm_lst.insert(1, None)
2215             else:
2216                 itm_lst.insert(1, round(item / 1e6, 1))
2217         itm_lst.extend(
2218             [
2219                 None if itm is None else round(itm, 1)
2220                 for itm in cmp_dict[tst_name]
2221             ]
2222         )
2223         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2224             tbl_lst_none.append(itm_lst)
2225         else:
2226             tbl_lst.append(itm_lst)
2227
2228     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2229     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2230     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2231     tbl_lst.extend(tbl_lst_none)
2232
2233     # Generate csv table:
2234     csv_file_name = f"{table[u'output-file']}.csv"
2235     logging.info(f"    Writing the file {csv_file_name}")
2236     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2237         for hdr in header:
2238             file_handler.write(u",".join(hdr) + u"\n")
2239         for test in tbl_lst:
2240             file_handler.write(u",".join(
2241                 [
2242                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2243                     replace(u"null", u"-") for item in test
2244                 ]
2245             ) + u"\n")
2246
2247     txt_file_name = f"{table[u'output-file']}.txt"
2248     logging.info(f"    Writing the file {txt_file_name}")
2249     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2250
2251     # Reorganize header in txt table
2252     txt_table = list()
2253     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2254         for line in list(file_handler):
2255             txt_table.append(line)
2256     try:
2257         txt_table.insert(5, txt_table.pop(2))
2258         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2259             file_handler.writelines(txt_table)
2260     except IndexError:
2261         pass
2262
2263     # Generate html table:
2264     hdr_html = [
2265         u"<br>".join(row) for row in zip(*header)
2266     ]
2267     _tpc_generate_html_table(
2268         hdr_html,
2269         tbl_lst,
2270         table[u'output-file'],
2271         sort_data=True,
2272         title=table.get(u"title", u""),
2273         generate_rst=False
2274     )