Report: Remove DNV testbeds
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43 NORM_FREQ = 2.0  # [GHz]
44
45
46 def generate_tables(spec, data):
47     """Generate all tables specified in the specification file.
48
49     :param spec: Specification read from the specification file.
50     :param data: Data to process.
51     :type spec: Specification
52     :type data: InputData
53     """
54
55     generator = {
56         "table_merged_details": table_merged_details,
57         "table_soak_vs_ndr": table_soak_vs_ndr,
58         "table_perf_trending_dash": table_perf_trending_dash,
59         "table_perf_trending_dash_html": table_perf_trending_dash_html,
60         "table_last_failed_tests": table_last_failed_tests,
61         "table_failed_tests": table_failed_tests,
62         "table_failed_tests_html": table_failed_tests_html,
63         "table_oper_data_html": table_oper_data_html,
64         "table_comparison": table_comparison,
65         "table_weekly_comparison": table_weekly_comparison,
66         "table_job_spec_duration": table_job_spec_duration
67     }
68
69     logging.info(u"Generating the tables ...")
70
71     norm_factor = dict()
72     for key, val in spec.environment.get("frequency", dict()).items():
73         norm_factor[key] = NORM_FREQ / val
74
75     for table in spec.tables:
76         try:
77             if table["algorithm"] == "table_weekly_comparison":
78                 table["testbeds"] = spec.environment.get("testbeds", None)
79             if table["algorithm"] == "table_comparison":
80                 table["norm_factor"] = norm_factor
81             generator[table["algorithm"]](table, data)
82         except NameError as err:
83             logging.error(
84                 f"Probably algorithm {table['algorithm']} is not defined: "
85                 f"{repr(err)}"
86             )
87     logging.info("Done.")
88
89
90 def table_job_spec_duration(table, input_data):
91     """Generate the table(s) with algorithm: table_job_spec_duration
92     specified in the specification file.
93
94     :param table: Table to generate.
95     :param input_data: Data to process.
96     :type table: pandas.Series
97     :type input_data: InputData
98     """
99
100     _ = input_data
101
102     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
103
104     jb_type = table.get(u"jb-type", None)
105
106     tbl_lst = list()
107     if jb_type == u"iterative":
108         for line in table.get(u"lines", tuple()):
109             tbl_itm = {
110                 u"name": line.get(u"job-spec", u""),
111                 u"data": list()
112             }
113             for job, builds in line.get(u"data-set", dict()).items():
114                 for build_nr in builds:
115                     try:
116                         minutes = input_data.metadata(
117                             job, str(build_nr)
118                         )[u"elapsedtime"] // 60000
119                     except (KeyError, IndexError, ValueError, AttributeError):
120                         continue
121                     tbl_itm[u"data"].append(minutes)
122             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
123             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
124             tbl_lst.append(tbl_itm)
125     elif jb_type == u"coverage":
126         job = table.get(u"data", None)
127         if not job:
128             return
129         for line in table.get(u"lines", tuple()):
130             try:
131                 tbl_itm = {
132                     u"name": line.get(u"job-spec", u""),
133                     u"mean": input_data.metadata(
134                         list(job.keys())[0], str(line[u"build"])
135                     )[u"elapsedtime"] // 60000,
136                     u"stdev": float(u"nan")
137                 }
138                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
139             except (KeyError, IndexError, ValueError, AttributeError):
140                 continue
141             tbl_lst.append(tbl_itm)
142     else:
143         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
144         return
145
146     for line in tbl_lst:
147         line[u"mean"] = \
148             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
149         if math.isnan(line[u"stdev"]):
150             line[u"stdev"] = u""
151         else:
152             line[u"stdev"] = \
153                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
154
155     if not tbl_lst:
156         return
157
158     rows = list()
159     for itm in tbl_lst:
160         rows.append([
161             itm[u"name"],
162             f"{len(itm[u'data'])}",
163             f"{itm[u'mean']} +- {itm[u'stdev']}"
164             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
165         ])
166
167     txt_table = prettytable.PrettyTable(
168         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
169     )
170     for row in rows:
171         txt_table.add_row(row)
172     txt_table.align = u"r"
173     txt_table.align[u"Job Specification"] = u"l"
174
175     file_name = f"{table.get(u'output-file', u'')}.txt"
176     with open(file_name, u"wt", encoding='utf-8') as txt_file:
177         txt_file.write(str(txt_table))
178
179
180 def table_oper_data_html(table, input_data):
181     """Generate the table(s) with algorithm: html_table_oper_data
182     specified in the specification file.
183
184     :param table: Table to generate.
185     :param input_data: Data to process.
186     :type table: pandas.Series
187     :type input_data: InputData
188     """
189
190     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
191     # Transform the data
192     logging.info(
193         f"    Creating the data set for the {table.get(u'type', u'')} "
194         f"{table.get(u'title', u'')}."
195     )
196     data = input_data.filter_data(
197         table,
198         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
199         continue_on_error=True
200     )
201     if data.empty:
202         return
203     data = input_data.merge_data(data)
204
205     sort_tests = table.get(u"sort", None)
206     if sort_tests:
207         args = dict(
208             inplace=True,
209             ascending=(sort_tests == u"ascending")
210         )
211         data.sort_index(**args)
212
213     suites = input_data.filter_data(
214         table,
215         continue_on_error=True,
216         data_set=u"suites"
217     )
218     if suites.empty:
219         return
220     suites = input_data.merge_data(suites)
221
222     def _generate_html_table(tst_data):
223         """Generate an HTML table with operational data for the given test.
224
225         :param tst_data: Test data to be used to generate the table.
226         :type tst_data: pandas.Series
227         :returns: HTML table with operational data.
228         :rtype: str
229         """
230
231         colors = {
232             u"header": u"#7eade7",
233             u"empty": u"#ffffff",
234             u"body": (u"#e9f1fb", u"#d4e4f7")
235         }
236
237         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
238
239         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
240         thead = ET.SubElement(
241             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242         )
243         thead.text = tst_data[u"name"]
244
245         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
246         thead = ET.SubElement(
247             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248         )
249         thead.text = u"\t"
250
251         if tst_data.get(u"telemetry-show-run", None) is None or \
252                 isinstance(tst_data[u"telemetry-show-run"], str):
253             trow = ET.SubElement(
254                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255             )
256             tcol = ET.SubElement(
257                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
258             )
259             tcol.text = u"No Data"
260
261             trow = ET.SubElement(
262                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
263             )
264             thead = ET.SubElement(
265                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
266             )
267             font = ET.SubElement(
268                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
269             )
270             font.text = u"."
271             return str(ET.tostring(tbl, encoding=u"unicode"))
272
273         tbl_hdr = (
274             u"Name",
275             u"Nr of Vectors",
276             u"Nr of Packets",
277             u"Suspends",
278             u"Cycles per Packet",
279             u"Average Vector Size"
280         )
281
282         for dut_data in tst_data[u"telemetry-show-run"].values():
283             trow = ET.SubElement(
284                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
285             )
286             tcol = ET.SubElement(
287                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
288             )
289             if dut_data.get(u"runtime", None) is None:
290                 tcol.text = u"No Data"
291                 continue
292
293             runtime = dict()
294             for item in dut_data[u"runtime"].get(u"data", tuple()):
295                 tid = int(item[u"labels"][u"thread_id"])
296                 if runtime.get(tid, None) is None:
297                     runtime[tid] = dict()
298                 gnode = item[u"labels"][u"graph_node"]
299                 if runtime[tid].get(gnode, None) is None:
300                     runtime[tid][gnode] = dict()
301                 try:
302                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
303                 except ValueError:
304                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
305
306             threads = dict({idx: list() for idx in range(len(runtime))})
307             for idx, run_data in runtime.items():
308                 for gnode, gdata in run_data.items():
309                     threads[idx].append([
310                         gnode,
311                         int(gdata[u"calls"]),
312                         int(gdata[u"vectors"]),
313                         int(gdata[u"suspends"]),
314                         float(gdata[u"clocks"]),
315                         float(gdata[u"vectors"] / gdata[u"calls"]) \
316                             if gdata[u"calls"] else 0.0
317                     ])
318
319             bold = ET.SubElement(tcol, u"b")
320             bold.text = (
321                 f"Host IP: {dut_data.get(u'host', '')}, "
322                 f"Socket: {dut_data.get(u'socket', '')}"
323             )
324             trow = ET.SubElement(
325                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
326             )
327             thead = ET.SubElement(
328                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
329             )
330             thead.text = u"\t"
331
332             for thread_nr, thread in threads.items():
333                 trow = ET.SubElement(
334                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
335                 )
336                 tcol = ET.SubElement(
337                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
338                 )
339                 bold = ET.SubElement(tcol, u"b")
340                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
341                 trow = ET.SubElement(
342                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
343                 )
344                 for idx, col in enumerate(tbl_hdr):
345                     tcol = ET.SubElement(
346                         trow, u"td",
347                         attrib=dict(align=u"right" if idx else u"left")
348                     )
349                     font = ET.SubElement(
350                         tcol, u"font", attrib=dict(size=u"2")
351                     )
352                     bold = ET.SubElement(font, u"b")
353                     bold.text = col
354                 for row_nr, row in enumerate(thread):
355                     trow = ET.SubElement(
356                         tbl, u"tr",
357                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
358                     )
359                     for idx, col in enumerate(row):
360                         tcol = ET.SubElement(
361                             trow, u"td",
362                             attrib=dict(align=u"right" if idx else u"left")
363                         )
364                         font = ET.SubElement(
365                             tcol, u"font", attrib=dict(size=u"2")
366                         )
367                         if isinstance(col, float):
368                             font.text = f"{col:.2f}"
369                         else:
370                             font.text = str(col)
371                 trow = ET.SubElement(
372                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
373                 )
374                 thead = ET.SubElement(
375                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
376                 )
377                 thead.text = u"\t"
378
379         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
380         thead = ET.SubElement(
381             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
382         )
383         font = ET.SubElement(
384             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
385         )
386         font.text = u"."
387
388         return str(ET.tostring(tbl, encoding=u"unicode"))
389
390     for suite in suites.values:
391         html_table = str()
392         for test_data in data.values:
393             if test_data[u"parent"] not in suite[u"name"]:
394                 continue
395             html_table += _generate_html_table(test_data)
396         if not html_table:
397             continue
398         try:
399             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
400             with open(f"{file_name}", u'w') as html_file:
401                 logging.info(f"    Writing file: {file_name}")
402                 html_file.write(u".. raw:: html\n\n\t")
403                 html_file.write(html_table)
404                 html_file.write(u"\n\t<p><br><br></p>\n")
405         except KeyError:
406             logging.warning(u"The output file is not defined.")
407             return
408     logging.info(u"  Done.")
409
410
411 def table_merged_details(table, input_data):
412     """Generate the table(s) with algorithm: table_merged_details
413     specified in the specification file.
414
415     :param table: Table to generate.
416     :param input_data: Data to process.
417     :type table: pandas.Series
418     :type input_data: InputData
419     """
420
421     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
422
423     # Transform the data
424     logging.info(
425         f"    Creating the data set for the {table.get(u'type', u'')} "
426         f"{table.get(u'title', u'')}."
427     )
428     data = input_data.filter_data(table, continue_on_error=True)
429     data = input_data.merge_data(data)
430
431     sort_tests = table.get(u"sort", None)
432     if sort_tests:
433         args = dict(
434             inplace=True,
435             ascending=(sort_tests == u"ascending")
436         )
437         data.sort_index(**args)
438
439     suites = input_data.filter_data(
440         table, continue_on_error=True, data_set=u"suites")
441     suites = input_data.merge_data(suites)
442
443     # Prepare the header of the tables
444     header = list()
445     for column in table[u"columns"]:
446         header.append(
447             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
448         )
449
450     for suite in suites.values:
451         # Generate data
452         suite_name = suite[u"name"]
453         table_lst = list()
454         for test in data.keys():
455             if data[test][u"status"] != u"PASS" or \
456                     data[test][u"parent"] not in suite_name:
457                 continue
458             row_lst = list()
459             for column in table[u"columns"]:
460                 try:
461                     col_data = str(data[test][column[
462                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
463                     # Do not include tests with "Test Failed" in test message
464                     if u"Test Failed" in col_data:
465                         continue
466                     col_data = col_data.replace(
467                         u"No Data", u"Not Captured     "
468                     )
469                     if column[u"data"].split(u" ")[1] in (u"name", ):
470                         if len(col_data) > 30:
471                             col_data_lst = col_data.split(u"-")
472                             half = int(len(col_data_lst) / 2)
473                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
474                                        f"- |br| " \
475                                        f"{u'-'.join(col_data_lst[half:])}"
476                         col_data = f" |prein| {col_data} |preout| "
477                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
478                         # Temporary solution: remove NDR results from message:
479                         if bool(table.get(u'remove-ndr', False)):
480                             try:
481                                 col_data = col_data.split(u"\n", 1)[1]
482                             except IndexError:
483                                 pass
484                         col_data = col_data.replace(u'\n', u' |br| ').\
485                             replace(u'\r', u'').replace(u'"', u"'")
486                         col_data = f" |prein| {col_data} |preout| "
487                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
488                         col_data = col_data.replace(u'\n', u' |br| ')
489                         col_data = f" |prein| {col_data[:-5]} |preout| "
490                     row_lst.append(f'"{col_data}"')
491                 except KeyError:
492                     row_lst.append(u'"Not captured"')
493             if len(row_lst) == len(table[u"columns"]):
494                 table_lst.append(row_lst)
495
496         # Write the data to file
497         if table_lst:
498             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
499             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
500             logging.info(f"      Writing file: {file_name}")
501             with open(file_name, u"wt") as file_handler:
502                 file_handler.write(u",".join(header) + u"\n")
503                 for item in table_lst:
504                     file_handler.write(u",".join(item) + u"\n")
505
506     logging.info(u"  Done.")
507
508
509 def _tpc_modify_test_name(test_name, ignore_nic=False):
510     """Modify a test name by replacing its parts.
511
512     :param test_name: Test name to be modified.
513     :param ignore_nic: If True, NIC is removed from TC name.
514     :type test_name: str
515     :type ignore_nic: bool
516     :returns: Modified test name.
517     :rtype: str
518     """
519     test_name_mod = test_name.\
520         replace(u"-ndrpdr", u"").\
521         replace(u"1t1c", u"1c").\
522         replace(u"2t1c", u"1c"). \
523         replace(u"2t2c", u"2c").\
524         replace(u"4t2c", u"2c"). \
525         replace(u"4t4c", u"4c").\
526         replace(u"8t4c", u"4c")
527
528     if ignore_nic:
529         return re.sub(REGEX_NIC, u"", test_name_mod)
530     return test_name_mod
531
532
533 def _tpc_modify_displayed_test_name(test_name):
534     """Modify a test name which is displayed in a table by replacing its parts.
535
536     :param test_name: Test name to be modified.
537     :type test_name: str
538     :returns: Modified test name.
539     :rtype: str
540     """
541     return test_name.\
542         replace(u"1t1c", u"1c").\
543         replace(u"2t1c", u"1c"). \
544         replace(u"2t2c", u"2c").\
545         replace(u"4t2c", u"2c"). \
546         replace(u"4t4c", u"4c").\
547         replace(u"8t4c", u"4c")
548
549
550 def _tpc_insert_data(target, src, include_tests):
551     """Insert src data to the target structure.
552
553     :param target: Target structure where the data is placed.
554     :param src: Source data to be placed into the target structure.
555     :param include_tests: Which results will be included (MRR, NDR, PDR).
556     :type target: list
557     :type src: dict
558     :type include_tests: str
559     """
560     try:
561         if include_tests == u"MRR":
562             target[u"mean"] = src[u"result"][u"receive-rate"]
563             target[u"stdev"] = src[u"result"][u"receive-stdev"]
564         elif include_tests == u"PDR":
565             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
566         elif include_tests == u"NDR":
567             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
568         elif u"latency" in include_tests:
569             keys = include_tests.split(u"-")
570             if len(keys) == 4:
571                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
572                 target[u"data"].append(
573                     float(u"nan") if lat == -1 else lat * 1e6
574                 )
575         elif include_tests == u"hoststack":
576             try:
577                 target[u"data"].append(
578                     float(src[u"result"][u"bits_per_second"])
579                 )
580             except KeyError:
581                 target[u"data"].append(
582                     (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
583                     ((float(src[u"result"][u"client"][u"time"]) +
584                       float(src[u"result"][u"server"][u"time"])) / 2)
585                 )
586         elif include_tests == u"vsap":
587             try:
588                 target[u"data"].append(src[u"result"][u"cps"])
589             except KeyError:
590                 target[u"data"].append(src[u"result"][u"rps"])
591     except (KeyError, TypeError):
592         pass
593
594
595 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
596                              footnote=u"", sort_data=True, title=u"",
597                              generate_rst=True):
598     """Generate html table from input data with simple sorting possibility.
599
600     :param header: Table header.
601     :param data: Input data to be included in the table. It is a list of lists.
602         Inner lists are rows in the table. All inner lists must be of the same
603         length. The length of these lists must be the same as the length of the
604         header.
605     :param out_file_name: The name (relative or full path) where the
606         generated html table is written.
607     :param legend: The legend to display below the table.
608     :param footnote: The footnote to display below the table (and legend).
609     :param sort_data: If True the data sorting is enabled.
610     :param title: The table (and file) title.
611     :param generate_rst: If True, wrapping rst file is generated.
612     :type header: list
613     :type data: list of lists
614     :type out_file_name: str
615     :type legend: str
616     :type footnote: str
617     :type sort_data: bool
618     :type title: str
619     :type generate_rst: bool
620     """
621
622     try:
623         idx = header.index(u"Test Case")
624     except ValueError:
625         idx = 0
626     params = {
627         u"align-hdr": (
628             [u"left", u"right"],
629             [u"left", u"left", u"right"],
630             [u"left", u"left", u"left", u"right"]
631         ),
632         u"align-itm": (
633             [u"left", u"right"],
634             [u"left", u"left", u"right"],
635             [u"left", u"left", u"left", u"right"]
636         ),
637         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
638     }
639
640     df_data = pd.DataFrame(data, columns=header)
641
642     if sort_data:
643         df_sorted = [df_data.sort_values(
644             by=[key, header[idx]], ascending=[True, True]
645             if key != header[idx] else [False, True]) for key in header]
646         df_sorted_rev = [df_data.sort_values(
647             by=[key, header[idx]], ascending=[False, True]
648             if key != header[idx] else [True, True]) for key in header]
649         df_sorted.extend(df_sorted_rev)
650     else:
651         df_sorted = df_data
652
653     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
654                    for idx in range(len(df_data))]]
655     table_header = dict(
656         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
657         fill_color=u"#7eade7",
658         align=params[u"align-hdr"][idx],
659         font=dict(
660             family=u"Courier New",
661             size=12
662         )
663     )
664
665     fig = go.Figure()
666
667     if sort_data:
668         for table in df_sorted:
669             columns = [table.get(col) for col in header]
670             fig.add_trace(
671                 go.Table(
672                     columnwidth=params[u"width"][idx],
673                     header=table_header,
674                     cells=dict(
675                         values=columns,
676                         fill_color=fill_color,
677                         align=params[u"align-itm"][idx],
678                         font=dict(
679                             family=u"Courier New",
680                             size=12
681                         )
682                     )
683                 )
684             )
685
686         buttons = list()
687         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
688         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
689         for idx, hdr in enumerate(menu_items):
690             visible = [False, ] * len(menu_items)
691             visible[idx] = True
692             buttons.append(
693                 dict(
694                     label=hdr.replace(u" [Mpps]", u""),
695                     method=u"update",
696                     args=[{u"visible": visible}],
697                 )
698             )
699
700         fig.update_layout(
701             updatemenus=[
702                 go.layout.Updatemenu(
703                     type=u"dropdown",
704                     direction=u"down",
705                     x=0.0,
706                     xanchor=u"left",
707                     y=1.002,
708                     yanchor=u"bottom",
709                     active=len(menu_items) - 1,
710                     buttons=list(buttons)
711                 )
712             ],
713         )
714     else:
715         fig.add_trace(
716             go.Table(
717                 columnwidth=params[u"width"][idx],
718                 header=table_header,
719                 cells=dict(
720                     values=[df_sorted.get(col) for col in header],
721                     fill_color=fill_color,
722                     align=params[u"align-itm"][idx],
723                     font=dict(
724                         family=u"Courier New",
725                         size=12
726                     )
727                 )
728             )
729         )
730
731     ploff.plot(
732         fig,
733         show_link=False,
734         auto_open=False,
735         filename=f"{out_file_name}_in.html"
736     )
737
738     if not generate_rst:
739         return
740
741     file_name = out_file_name.split(u"/")[-1]
742     if u"vpp" in out_file_name:
743         path = u"_tmp/src/vpp_performance_tests/comparisons/"
744     else:
745         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
746     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
747     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
748         rst_file.write(
749             u"\n"
750             u".. |br| raw:: html\n\n    <br />\n\n\n"
751             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
752             u".. |preout| raw:: html\n\n    </pre>\n\n"
753         )
754         if title:
755             rst_file.write(f"{title}\n")
756             rst_file.write(f"{u'`' * len(title)}\n\n")
757         rst_file.write(
758             u".. raw:: html\n\n"
759             f'    <iframe frameborder="0" scrolling="no" '
760             f'width="1600" height="1200" '
761             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
762             f'</iframe>\n\n'
763         )
764
765         if legend:
766             try:
767                 itm_lst = legend[1:-2].split(u"\n")
768                 rst_file.write(
769                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
770                 )
771             except IndexError as err:
772                 logging.error(f"Legend cannot be written to html file\n{err}")
773         if footnote:
774             try:
775                 itm_lst = footnote[1:].split(u"\n")
776                 rst_file.write(
777                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
778                 )
779             except IndexError as err:
780                 logging.error(f"Footnote cannot be written to html file\n{err}")
781
782
783 def table_soak_vs_ndr(table, input_data):
784     """Generate the table(s) with algorithm: table_soak_vs_ndr
785     specified in the specification file.
786
787     :param table: Table to generate.
788     :param input_data: Data to process.
789     :type table: pandas.Series
790     :type input_data: InputData
791     """
792
793     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
794
795     # Transform the data
796     logging.info(
797         f"    Creating the data set for the {table.get(u'type', u'')} "
798         f"{table.get(u'title', u'')}."
799     )
800     data = input_data.filter_data(table, continue_on_error=True)
801
802     # Prepare the header of the table
803     try:
804         header = [
805             u"Test Case",
806             f"Avg({table[u'reference'][u'title']})",
807             f"Stdev({table[u'reference'][u'title']})",
808             f"Avg({table[u'compare'][u'title']})",
809             f"Stdev{table[u'compare'][u'title']})",
810             u"Diff",
811             u"Stdev(Diff)"
812         ]
813         header_str = u";".join(header) + u"\n"
814         legend = (
815             u"\nLegend:\n"
816             f"Avg({table[u'reference'][u'title']}): "
817             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
818             f"from a series of runs of the listed tests.\n"
819             f"Stdev({table[u'reference'][u'title']}): "
820             f"Standard deviation value of {table[u'reference'][u'title']} "
821             f"[Mpps] computed from a series of runs of the listed tests.\n"
822             f"Avg({table[u'compare'][u'title']}): "
823             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
824             f"a series of runs of the listed tests.\n"
825             f"Stdev({table[u'compare'][u'title']}): "
826             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
827             f"computed from a series of runs of the listed tests.\n"
828             f"Diff({table[u'reference'][u'title']},"
829             f"{table[u'compare'][u'title']}): "
830             f"Percentage change calculated for mean values.\n"
831             u"Stdev(Diff): "
832             u"Standard deviation of percentage change calculated for mean "
833             u"values."
834         )
835     except (AttributeError, KeyError) as err:
836         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
837         return
838
839     # Create a list of available SOAK test results:
840     tbl_dict = dict()
841     for job, builds in table[u"compare"][u"data"].items():
842         for build in builds:
843             for tst_name, tst_data in data[job][str(build)].items():
844                 if tst_data[u"type"] == u"SOAK":
845                     tst_name_mod = tst_name.replace(u"-soak", u"")
846                     if tbl_dict.get(tst_name_mod, None) is None:
847                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
848                         nic = groups.group(0) if groups else u""
849                         name = (
850                             f"{nic}-"
851                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
852                         )
853                         tbl_dict[tst_name_mod] = {
854                             u"name": name,
855                             u"ref-data": list(),
856                             u"cmp-data": list()
857                         }
858                     try:
859                         tbl_dict[tst_name_mod][u"cmp-data"].append(
860                             tst_data[u"throughput"][u"LOWER"])
861                     except (KeyError, TypeError):
862                         pass
863     tests_lst = tbl_dict.keys()
864
865     # Add corresponding NDR test results:
866     for job, builds in table[u"reference"][u"data"].items():
867         for build in builds:
868             for tst_name, tst_data in data[job][str(build)].items():
869                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
870                     replace(u"-mrr", u"")
871                 if tst_name_mod not in tests_lst:
872                     continue
873                 try:
874                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
875                         continue
876                     if table[u"include-tests"] == u"MRR":
877                         result = (tst_data[u"result"][u"receive-rate"],
878                                   tst_data[u"result"][u"receive-stdev"])
879                     elif table[u"include-tests"] == u"PDR":
880                         result = \
881                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
882                     elif table[u"include-tests"] == u"NDR":
883                         result = \
884                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
885                     else:
886                         result = None
887                     if result is not None:
888                         tbl_dict[tst_name_mod][u"ref-data"].append(
889                             result)
890                 except (KeyError, TypeError):
891                     continue
892
893     tbl_lst = list()
894     for tst_name in tbl_dict:
895         item = [tbl_dict[tst_name][u"name"], ]
896         data_r = tbl_dict[tst_name][u"ref-data"]
897         if data_r:
898             if table[u"include-tests"] == u"MRR":
899                 data_r_mean = data_r[0][0]
900                 data_r_stdev = data_r[0][1]
901             else:
902                 data_r_mean = mean(data_r)
903                 data_r_stdev = stdev(data_r)
904             item.append(round(data_r_mean / 1e6, 1))
905             item.append(round(data_r_stdev / 1e6, 1))
906         else:
907             data_r_mean = None
908             data_r_stdev = None
909             item.extend([None, None])
910         data_c = tbl_dict[tst_name][u"cmp-data"]
911         if data_c:
912             if table[u"include-tests"] == u"MRR":
913                 data_c_mean = data_c[0][0]
914                 data_c_stdev = data_c[0][1]
915             else:
916                 data_c_mean = mean(data_c)
917                 data_c_stdev = stdev(data_c)
918             item.append(round(data_c_mean / 1e6, 2))
919             item.append(round(data_c_stdev / 1e6, 2))
920         else:
921             data_c_mean = None
922             data_c_stdev = None
923             item.extend([None, None])
924         if data_r_mean is not None and data_c_mean is not None:
925             delta, d_stdev = relative_change_stdev(
926                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
927             try:
928                 item.append(round(delta, 2))
929             except ValueError:
930                 item.append(delta)
931             try:
932                 item.append(round(d_stdev, 2))
933             except ValueError:
934                 item.append(d_stdev)
935             tbl_lst.append(item)
936
937     # Sort the table according to the relative change
938     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
939
940     # Generate csv tables:
941     csv_file_name = f"{table[u'output-file']}.csv"
942     with open(csv_file_name, u"wt") as file_handler:
943         file_handler.write(header_str)
944         for test in tbl_lst:
945             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
946
947     convert_csv_to_pretty_txt(
948         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
949     )
950     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
951         file_handler.write(legend)
952
953     # Generate html table:
954     _tpc_generate_html_table(
955         header,
956         tbl_lst,
957         table[u'output-file'],
958         legend=legend,
959         title=table.get(u"title", u"")
960     )
961
962
963 def table_perf_trending_dash(table, input_data):
964     """Generate the table(s) with algorithm:
965     table_perf_trending_dash
966     specified in the specification file.
967
968     :param table: Table to generate.
969     :param input_data: Data to process.
970     :type table: pandas.Series
971     :type input_data: InputData
972     """
973
974     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
975
976     # Transform the data
977     logging.info(
978         f"    Creating the data set for the {table.get(u'type', u'')} "
979         f"{table.get(u'title', u'')}."
980     )
981     data = input_data.filter_data(table, continue_on_error=True)
982
983     # Prepare the header of the tables
984     header = [
985         u"Test Case",
986         u"Trend [Mpps]",
987         u"Runs [#]",
988         u"Long-Term Change [%]",
989         u"Regressions [#]",
990         u"Progressions [#]"
991     ]
992     header_str = u",".join(header) + u"\n"
993
994     incl_tests = table.get(u"include-tests", u"MRR")
995
996     # Prepare data to the table:
997     tbl_dict = dict()
998     for job, builds in table[u"data"].items():
999         for build in builds:
1000             for tst_name, tst_data in data[job][str(build)].items():
1001                 if tst_name.lower() in table.get(u"ignore-list", list()):
1002                     continue
1003                 if tbl_dict.get(tst_name, None) is None:
1004                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1005                     if not groups:
1006                         continue
1007                     nic = groups.group(0)
1008                     tbl_dict[tst_name] = {
1009                         u"name": f"{nic}-{tst_data[u'name']}",
1010                         u"data": OrderedDict()
1011                     }
1012                 try:
1013                     if incl_tests == u"MRR":
1014                         tbl_dict[tst_name][u"data"][str(build)] = \
1015                             tst_data[u"result"][u"receive-rate"]
1016                     elif incl_tests == u"NDR":
1017                         tbl_dict[tst_name][u"data"][str(build)] = \
1018                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1019                     elif incl_tests == u"PDR":
1020                         tbl_dict[tst_name][u"data"][str(build)] = \
1021                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1022                 except (TypeError, KeyError):
1023                     pass  # No data in output.xml for this test
1024
1025     tbl_lst = list()
1026     for tst_name in tbl_dict:
1027         data_t = tbl_dict[tst_name][u"data"]
1028         if len(data_t) < 2:
1029             continue
1030
1031         try:
1032             classification_lst, avgs, _ = classify_anomalies(data_t)
1033         except ValueError as err:
1034             logging.info(f"{err} Skipping")
1035             return
1036
1037         win_size = min(len(data_t), table[u"window"])
1038         long_win_size = min(len(data_t), table[u"long-trend-window"])
1039
1040         try:
1041             max_long_avg = max(
1042                 [x for x in avgs[-long_win_size:-win_size]
1043                  if not isnan(x)])
1044         except ValueError:
1045             max_long_avg = nan
1046         last_avg = avgs[-1]
1047         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1048
1049         nr_of_last_avgs = 0;
1050         for x in reversed(avgs):
1051             if x == last_avg:
1052                 nr_of_last_avgs += 1
1053             else:
1054                 break
1055
1056         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1057             rel_change_last = nan
1058         else:
1059             rel_change_last = round(
1060                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1061
1062         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1063             rel_change_long = nan
1064         else:
1065             rel_change_long = round(
1066                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1067
1068         if classification_lst:
1069             if isnan(rel_change_last) and isnan(rel_change_long):
1070                 continue
1071             if isnan(last_avg) or isnan(rel_change_last) or \
1072                     isnan(rel_change_long):
1073                 continue
1074             tbl_lst.append(
1075                 [tbl_dict[tst_name][u"name"],
1076                  round(last_avg / 1e6, 2),
1077                  nr_of_last_avgs,
1078                  rel_change_long,
1079                  classification_lst[-win_size+1:].count(u"regression"),
1080                  classification_lst[-win_size+1:].count(u"progression")])
1081
1082     tbl_lst.sort(key=lambda rel: rel[0])
1083     tbl_lst.sort(key=lambda rel: rel[2])
1084     tbl_lst.sort(key=lambda rel: rel[3])
1085     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1086     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1087
1088     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1089
1090     logging.info(f"    Writing file: {file_name}")
1091     with open(file_name, u"wt") as file_handler:
1092         file_handler.write(header_str)
1093         for test in tbl_lst:
1094             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1095
1096     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1097     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1098
1099
1100 def _generate_url(testbed, test_name):
1101     """Generate URL to a trending plot from the name of the test case.
1102
1103     :param testbed: The testbed used for testing.
1104     :param test_name: The name of the test case.
1105     :type testbed: str
1106     :type test_name: str
1107     :returns: The URL to the plot with the trending data for the given test
1108         case.
1109     :rtype str
1110     """
1111
1112     if u"x520" in test_name:
1113         nic = u"x520"
1114     elif u"x710" in test_name:
1115         nic = u"x710"
1116     elif u"xl710" in test_name:
1117         nic = u"xl710"
1118     elif u"xxv710" in test_name:
1119         nic = u"xxv710"
1120     elif u"vic1227" in test_name:
1121         nic = u"vic1227"
1122     elif u"vic1385" in test_name:
1123         nic = u"vic1385"
1124     elif u"x553" in test_name:
1125         nic = u"x553"
1126     elif u"cx556" in test_name or u"cx556a" in test_name:
1127         nic = u"cx556a"
1128     elif u"ena" in test_name:
1129         nic = u"nitro50g"
1130     else:
1131         nic = u""
1132
1133     if u"64b" in test_name:
1134         frame_size = u"64b"
1135     elif u"78b" in test_name:
1136         frame_size = u"78b"
1137     elif u"imix" in test_name:
1138         frame_size = u"imix"
1139     elif u"9000b" in test_name:
1140         frame_size = u"9000b"
1141     elif u"1518b" in test_name:
1142         frame_size = u"1518b"
1143     elif u"114b" in test_name:
1144         frame_size = u"114b"
1145     else:
1146         frame_size = u""
1147
1148     if u"1t1c" in test_name or \
1149         (u"-1c-" in test_name and testbed in (u"3n-tsh", u"2n-tx2")):
1150         cores = u"1t1c"
1151     elif u"2t2c" in test_name or \
1152          (u"-2c-" in test_name and testbed in (u"3n-tsh", u"2n-tx2")):
1153         cores = u"2t2c"
1154     elif u"4t4c" in test_name or \
1155          (u"-4c-" in test_name and testbed in (u"3n-tsh", u"2n-tx2")):
1156         cores = u"4t4c"
1157     elif u"2t1c" in test_name or \
1158          (u"-1c-" in test_name and
1159           testbed in (u"2n-icx", u"3n-icx", u"2n-clx", u"2n-zn2", u"2n-aws")):
1160         cores = u"2t1c"
1161     elif u"4t2c" in test_name or \
1162          (u"-2c-" in test_name and
1163           testbed in (u"2n-icx", u"3n-icx", u"2n-clx", u"2n-zn2", u"2n-aws")):
1164         cores = u"4t2c"
1165     elif u"8t4c" in test_name or \
1166          (u"-4c-" in test_name and
1167           testbed in (u"2n-icx", u"3n-icx", u"2n-clx", u"2n-zn2", u"2n-aws")):
1168         cores = u"8t4c"
1169     else:
1170         cores = u""
1171
1172     if u"testpmd" in test_name:
1173         driver = u"testpmd"
1174     elif u"l3fwd" in test_name:
1175         driver = u"l3fwd"
1176     elif u"avf" in test_name:
1177         driver = u"avf"
1178     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1179         driver = u"af_xdp"
1180     elif u"rdma" in test_name:
1181         driver = u"rdma"
1182     elif u"tsh" in testbed:
1183         driver = u"ixgbe"
1184     elif u"ena" in test_name:
1185         driver = u"ena"
1186     else:
1187         driver = u"dpdk"
1188
1189     if u"macip-iacl1s" in test_name:
1190         bsf = u"features-macip-iacl1"
1191     elif u"macip-iacl10s" in test_name:
1192         bsf = u"features-macip-iacl10"
1193     elif u"macip-iacl50s" in test_name:
1194         bsf = u"features-macip-iacl50"
1195     elif u"iacl1s" in test_name:
1196         bsf = u"features-iacl1"
1197     elif u"iacl10s" in test_name:
1198         bsf = u"features-iacl10"
1199     elif u"iacl50s" in test_name:
1200         bsf = u"features-iacl50"
1201     elif u"oacl1s" in test_name:
1202         bsf = u"features-oacl1"
1203     elif u"oacl10s" in test_name:
1204         bsf = u"features-oacl10"
1205     elif u"oacl50s" in test_name:
1206         bsf = u"features-oacl50"
1207     elif u"nat44det" in test_name:
1208         bsf = u"nat44det-bidir"
1209     elif u"nat44ed" in test_name and u"udir" in test_name:
1210         bsf = u"nat44ed-udir"
1211     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1212         bsf = u"udp-cps"
1213     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1214         bsf = u"tcp-cps"
1215     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1216         bsf = u"udp-pps"
1217     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1218         bsf = u"tcp-pps"
1219     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1220         bsf = u"udp-tput"
1221     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1222         bsf = u"tcp-tput"
1223     elif u"udpsrcscale" in test_name:
1224         bsf = u"features-udp"
1225     elif u"iacl" in test_name:
1226         bsf = u"features"
1227     elif u"policer" in test_name:
1228         bsf = u"features"
1229     elif u"adl" in test_name:
1230         bsf = u"features"
1231     elif u"cop" in test_name:
1232         bsf = u"features"
1233     elif u"nat" in test_name:
1234         bsf = u"features"
1235     elif u"macip" in test_name:
1236         bsf = u"features"
1237     elif u"scale" in test_name:
1238         bsf = u"scale"
1239     elif u"base" in test_name:
1240         bsf = u"base"
1241     else:
1242         bsf = u"base"
1243
1244     if u"114b" in test_name and u"vhost" in test_name:
1245         domain = u"vts"
1246     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1247         domain = u"nat44"
1248         if u"nat44det" in test_name:
1249             domain += u"-det-bidir"
1250         else:
1251             domain += u"-ed"
1252         if u"udir" in test_name:
1253             domain += u"-unidir"
1254         elif u"-ethip4udp-" in test_name:
1255             domain += u"-udp"
1256         elif u"-ethip4tcp-" in test_name:
1257             domain += u"-tcp"
1258         if u"-cps" in test_name:
1259             domain += u"-cps"
1260         elif u"-pps" in test_name:
1261             domain += u"-pps"
1262         elif u"-tput" in test_name:
1263             domain += u"-tput"
1264     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1265         domain = u"dpdk"
1266     elif u"memif" in test_name:
1267         domain = u"container_memif"
1268     elif u"srv6" in test_name:
1269         domain = u"srv6"
1270     elif u"vhost" in test_name:
1271         domain = u"vhost"
1272         if u"vppl2xc" in test_name:
1273             driver += u"-vpp"
1274         else:
1275             driver += u"-testpmd"
1276         if u"lbvpplacp" in test_name:
1277             bsf += u"-link-bonding"
1278     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1279         domain = u"nf_service_density_vnfc"
1280     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1281         domain = u"nf_service_density_cnfc"
1282     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1283         domain = u"nf_service_density_cnfp"
1284     elif u"ipsec" in test_name:
1285         domain = u"ipsec"
1286         if u"sw" in test_name:
1287             bsf += u"-sw"
1288         elif u"hw" in test_name:
1289             bsf += u"-hw"
1290         elif u"spe" in test_name:
1291             bsf += u"-spe"
1292     elif u"ethip4vxlan" in test_name:
1293         domain = u"ip4_tunnels"
1294     elif u"ethip4udpgeneve" in test_name:
1295         domain = u"ip4_tunnels"
1296     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1297         domain = u"ip4"
1298     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1299         domain = u"ip6"
1300     elif u"l2xcbase" in test_name or \
1301             u"l2xcscale" in test_name or \
1302             u"l2bdbasemaclrn" in test_name or \
1303             u"l2bdscale" in test_name or \
1304             u"l2patch" in test_name:
1305         domain = u"l2"
1306     else:
1307         domain = u""
1308
1309     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1310     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1311
1312     return file_name + anchor_name
1313
1314
1315 def table_perf_trending_dash_html(table, input_data):
1316     """Generate the table(s) with algorithm:
1317     table_perf_trending_dash_html specified in the specification
1318     file.
1319
1320     :param table: Table to generate.
1321     :param input_data: Data to process.
1322     :type table: dict
1323     :type input_data: InputData
1324     """
1325
1326     _ = input_data
1327
1328     if not table.get(u"testbed", None):
1329         logging.error(
1330             f"The testbed is not defined for the table "
1331             f"{table.get(u'title', u'')}. Skipping."
1332         )
1333         return
1334
1335     test_type = table.get(u"test-type", u"MRR")
1336     if test_type not in (u"MRR", u"NDR", u"PDR"):
1337         logging.error(
1338             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1339             f"Skipping."
1340         )
1341         return
1342
1343     if test_type in (u"NDR", u"PDR"):
1344         lnk_dir = u"../ndrpdr_trending/"
1345         lnk_sufix = f"-{test_type.lower()}"
1346     else:
1347         lnk_dir = u"../trending/"
1348         lnk_sufix = u""
1349
1350     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1351
1352     try:
1353         with open(table[u"input-file"], u'rt') as csv_file:
1354             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1355     except FileNotFoundError as err:
1356         logging.warning(f"{err}")
1357         return
1358     except KeyError:
1359         logging.warning(u"The input file is not defined.")
1360         return
1361     except csv.Error as err:
1362         logging.warning(
1363             f"Not possible to process the file {table[u'input-file']}.\n"
1364             f"{repr(err)}"
1365         )
1366         return
1367
1368     # Table:
1369     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1370
1371     # Table header:
1372     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1373     for idx, item in enumerate(csv_lst[0]):
1374         alignment = u"left" if idx == 0 else u"center"
1375         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1376         thead.text = item
1377
1378     # Rows:
1379     colors = {
1380         u"regression": (
1381             u"#ffcccc",
1382             u"#ff9999"
1383         ),
1384         u"progression": (
1385             u"#c6ecc6",
1386             u"#9fdf9f"
1387         ),
1388         u"normal": (
1389             u"#e9f1fb",
1390             u"#d4e4f7"
1391         )
1392     }
1393     for r_idx, row in enumerate(csv_lst[1:]):
1394         if int(row[4]):
1395             color = u"regression"
1396         elif int(row[5]):
1397             color = u"progression"
1398         else:
1399             color = u"normal"
1400         trow = ET.SubElement(
1401             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1402         )
1403
1404         # Columns:
1405         for c_idx, item in enumerate(row):
1406             tdata = ET.SubElement(
1407                 trow,
1408                 u"td",
1409                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1410             )
1411             # Name:
1412             if c_idx == 0 and table.get(u"add-links", True):
1413                 ref = ET.SubElement(
1414                     tdata,
1415                     u"a",
1416                     attrib=dict(
1417                         href=f"{lnk_dir}"
1418                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1419                         f"{lnk_sufix}"
1420                     )
1421                 )
1422                 ref.text = item
1423             else:
1424                 tdata.text = item
1425     try:
1426         with open(table[u"output-file"], u'w') as html_file:
1427             logging.info(f"    Writing file: {table[u'output-file']}")
1428             html_file.write(u".. raw:: html\n\n\t")
1429             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1430             html_file.write(u"\n\t<p><br><br></p>\n")
1431     except KeyError:
1432         logging.warning(u"The output file is not defined.")
1433         return
1434
1435
1436 def table_last_failed_tests(table, input_data):
1437     """Generate the table(s) with algorithm: table_last_failed_tests
1438     specified in the specification file.
1439
1440     :param table: Table to generate.
1441     :param input_data: Data to process.
1442     :type table: pandas.Series
1443     :type input_data: InputData
1444     """
1445
1446     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1447
1448     # Transform the data
1449     logging.info(
1450         f"    Creating the data set for the {table.get(u'type', u'')} "
1451         f"{table.get(u'title', u'')}."
1452     )
1453
1454     data = input_data.filter_data(table, continue_on_error=True)
1455
1456     if data is None or data.empty:
1457         logging.warning(
1458             f"    No data for the {table.get(u'type', u'')} "
1459             f"{table.get(u'title', u'')}."
1460         )
1461         return
1462
1463     tbl_list = list()
1464     for job, builds in table[u"data"].items():
1465         for build in builds:
1466             build = str(build)
1467             try:
1468                 version = input_data.metadata(job, build).get(u"version", u"")
1469                 duration = \
1470                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1471             except KeyError:
1472                 logging.error(f"Data for {job}: {build} is not present.")
1473                 return
1474             tbl_list.append(build)
1475             tbl_list.append(version)
1476             failed_tests = list()
1477             passed = 0
1478             failed = 0
1479             for tst_data in data[job][build].values:
1480                 if tst_data[u"status"] != u"FAIL":
1481                     passed += 1
1482                     continue
1483                 failed += 1
1484                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1485                 if not groups:
1486                     continue
1487                 nic = groups.group(0)
1488                 msg = tst_data[u'msg'].replace(u"\n", u"")
1489                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1490                              'xxx.xxx.xxx.xxx', msg)
1491                 msg = msg.split(u'Also teardown failed')[0]
1492                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1493             tbl_list.append(passed)
1494             tbl_list.append(failed)
1495             tbl_list.append(duration)
1496             tbl_list.extend(failed_tests)
1497
1498     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1499     logging.info(f"    Writing file: {file_name}")
1500     with open(file_name, u"wt") as file_handler:
1501         for test in tbl_list:
1502             file_handler.write(f"{test}\n")
1503
1504
1505 def table_failed_tests(table, input_data):
1506     """Generate the table(s) with algorithm: table_failed_tests
1507     specified in the specification file.
1508
1509     :param table: Table to generate.
1510     :param input_data: Data to process.
1511     :type table: pandas.Series
1512     :type input_data: InputData
1513     """
1514
1515     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1516
1517     # Transform the data
1518     logging.info(
1519         f"    Creating the data set for the {table.get(u'type', u'')} "
1520         f"{table.get(u'title', u'')}."
1521     )
1522     data = input_data.filter_data(table, continue_on_error=True)
1523
1524     test_type = u"MRR"
1525     if u"NDRPDR" in table.get(u"filter", list()):
1526         test_type = u"NDRPDR"
1527
1528     # Prepare the header of the tables
1529     header = [
1530         u"Test Case",
1531         u"Failures [#]",
1532         u"Last Failure [Time]",
1533         u"Last Failure [VPP-Build-Id]",
1534         u"Last Failure [CSIT-Job-Build-Id]"
1535     ]
1536
1537     # Generate the data for the table according to the model in the table
1538     # specification
1539
1540     now = dt.utcnow()
1541     timeperiod = timedelta(int(table.get(u"window", 7)))
1542
1543     tbl_dict = dict()
1544     for job, builds in table[u"data"].items():
1545         for build in builds:
1546             build = str(build)
1547             for tst_name, tst_data in data[job][build].items():
1548                 if tst_name.lower() in table.get(u"ignore-list", list()):
1549                     continue
1550                 if tbl_dict.get(tst_name, None) is None:
1551                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1552                     if not groups:
1553                         continue
1554                     nic = groups.group(0)
1555                     tbl_dict[tst_name] = {
1556                         u"name": f"{nic}-{tst_data[u'name']}",
1557                         u"data": OrderedDict()
1558                     }
1559                 try:
1560                     generated = input_data.metadata(job, build).\
1561                         get(u"generated", u"")
1562                     if not generated:
1563                         continue
1564                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1565                     if (now - then) <= timeperiod:
1566                         tbl_dict[tst_name][u"data"][build] = (
1567                             tst_data[u"status"],
1568                             generated,
1569                             input_data.metadata(job, build).get(u"version",
1570                                                                 u""),
1571                             build
1572                         )
1573                 except (TypeError, KeyError) as err:
1574                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1575
1576     max_fails = 0
1577     tbl_lst = list()
1578     for tst_data in tbl_dict.values():
1579         fails_nr = 0
1580         fails_last_date = u""
1581         fails_last_vpp = u""
1582         fails_last_csit = u""
1583         for val in tst_data[u"data"].values():
1584             if val[0] == u"FAIL":
1585                 fails_nr += 1
1586                 fails_last_date = val[1]
1587                 fails_last_vpp = val[2]
1588                 fails_last_csit = val[3]
1589         if fails_nr:
1590             max_fails = fails_nr if fails_nr > max_fails else max_fails
1591             tbl_lst.append([
1592                 tst_data[u"name"],
1593                 fails_nr,
1594                 fails_last_date,
1595                 fails_last_vpp,
1596                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1597                 f"-build-{fails_last_csit}"
1598             ])
1599
1600     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1601     tbl_sorted = list()
1602     for nrf in range(max_fails, -1, -1):
1603         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1604         tbl_sorted.extend(tbl_fails)
1605
1606     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1607     logging.info(f"    Writing file: {file_name}")
1608     with open(file_name, u"wt") as file_handler:
1609         file_handler.write(u",".join(header) + u"\n")
1610         for test in tbl_sorted:
1611             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1612
1613     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1614     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1615
1616
1617 def table_failed_tests_html(table, input_data):
1618     """Generate the table(s) with algorithm: table_failed_tests_html
1619     specified in the specification file.
1620
1621     :param table: Table to generate.
1622     :param input_data: Data to process.
1623     :type table: pandas.Series
1624     :type input_data: InputData
1625     """
1626
1627     _ = input_data
1628
1629     if not table.get(u"testbed", None):
1630         logging.error(
1631             f"The testbed is not defined for the table "
1632             f"{table.get(u'title', u'')}. Skipping."
1633         )
1634         return
1635
1636     test_type = table.get(u"test-type", u"MRR")
1637     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1638         logging.error(
1639             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1640             f"Skipping."
1641         )
1642         return
1643
1644     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1645         lnk_dir = u"../ndrpdr_trending/"
1646         lnk_sufix = u"-pdr"
1647     else:
1648         lnk_dir = u"../trending/"
1649         lnk_sufix = u""
1650
1651     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1652
1653     try:
1654         with open(table[u"input-file"], u'rt') as csv_file:
1655             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1656     except KeyError:
1657         logging.warning(u"The input file is not defined.")
1658         return
1659     except csv.Error as err:
1660         logging.warning(
1661             f"Not possible to process the file {table[u'input-file']}.\n"
1662             f"{repr(err)}"
1663         )
1664         return
1665
1666     # Table:
1667     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1668
1669     # Table header:
1670     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1671     for idx, item in enumerate(csv_lst[0]):
1672         alignment = u"left" if idx == 0 else u"center"
1673         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1674         thead.text = item
1675
1676     # Rows:
1677     colors = (u"#e9f1fb", u"#d4e4f7")
1678     for r_idx, row in enumerate(csv_lst[1:]):
1679         background = colors[r_idx % 2]
1680         trow = ET.SubElement(
1681             failed_tests, u"tr", attrib=dict(bgcolor=background)
1682         )
1683
1684         # Columns:
1685         for c_idx, item in enumerate(row):
1686             tdata = ET.SubElement(
1687                 trow,
1688                 u"td",
1689                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1690             )
1691             # Name:
1692             if c_idx == 0 and table.get(u"add-links", True):
1693                 ref = ET.SubElement(
1694                     tdata,
1695                     u"a",
1696                     attrib=dict(
1697                         href=f"{lnk_dir}"
1698                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1699                         f"{lnk_sufix}"
1700                     )
1701                 )
1702                 ref.text = item
1703             else:
1704                 tdata.text = item
1705     try:
1706         with open(table[u"output-file"], u'w') as html_file:
1707             logging.info(f"    Writing file: {table[u'output-file']}")
1708             html_file.write(u".. raw:: html\n\n\t")
1709             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1710             html_file.write(u"\n\t<p><br><br></p>\n")
1711     except KeyError:
1712         logging.warning(u"The output file is not defined.")
1713         return
1714
1715
1716 def table_comparison(table, input_data):
1717     """Generate the table(s) with algorithm: table_comparison
1718     specified in the specification file.
1719
1720     :param table: Table to generate.
1721     :param input_data: Data to process.
1722     :type table: pandas.Series
1723     :type input_data: InputData
1724     """
1725     logging.info(f"  Generating the table {table.get('title', '')} ...")
1726
1727     # Transform the data
1728     logging.info(
1729         f"    Creating the data set for the {table.get('type', '')} "
1730         f"{table.get('title', '')}."
1731     )
1732
1733     columns = table.get("columns", None)
1734     if not columns:
1735         logging.error(
1736             f"No columns specified for {table.get('title', '')}. Skipping."
1737         )
1738         return
1739
1740     cols = list()
1741     for idx, col in enumerate(columns):
1742         if col.get("data-set", None) is None:
1743             logging.warning(f"No data for column {col.get('title', '')}")
1744             continue
1745         tag = col.get("tag", None)
1746         data = input_data.filter_data(
1747             table,
1748             params=[
1749                 "throughput",
1750                 "result",
1751                 "latency",
1752                 "name",
1753                 "parent",
1754                 "tags"
1755             ],
1756             data=col["data-set"],
1757             continue_on_error=True
1758         )
1759         col_data = {
1760             "title": col.get("title", f"Column{idx}"),
1761             "data": dict()
1762         }
1763         for builds in data.values:
1764             for build in builds:
1765                 for tst_name, tst_data in build.items():
1766                     if tag and tag not in tst_data["tags"]:
1767                         continue
1768                     tst_name_mod = \
1769                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1770                         replace("2n1l-", "")
1771                     if col_data["data"].get(tst_name_mod, None) is None:
1772                         name = tst_data['name'].rsplit('-', 1)[0]
1773                         if "across testbeds" in table["title"].lower() or \
1774                                 "across topologies" in table["title"].lower():
1775                             name = _tpc_modify_displayed_test_name(name)
1776                         col_data["data"][tst_name_mod] = {
1777                             "name": name,
1778                             "replace": True,
1779                             "data": list(),
1780                             "mean": None,
1781                             "stdev": None
1782                         }
1783                     _tpc_insert_data(
1784                         target=col_data["data"][tst_name_mod],
1785                         src=tst_data,
1786                         include_tests=table["include-tests"]
1787                     )
1788
1789         replacement = col.get("data-replacement", None)
1790         if replacement:
1791             rpl_data = input_data.filter_data(
1792                 table,
1793                 params=[
1794                     "throughput",
1795                     "result",
1796                     "latency",
1797                     "name",
1798                     "parent",
1799                     "tags"
1800                 ],
1801                 data=replacement,
1802                 continue_on_error=True
1803             )
1804             for builds in rpl_data.values:
1805                 for build in builds:
1806                     for tst_name, tst_data in build.items():
1807                         if tag and tag not in tst_data["tags"]:
1808                             continue
1809                         tst_name_mod = \
1810                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1811                             replace("2n1l-", "")
1812                         if col_data["data"].get(tst_name_mod, None) is None:
1813                             name = tst_data['name'].rsplit('-', 1)[0]
1814                             if "across testbeds" in table["title"].lower() \
1815                                     or "across topologies" in \
1816                                     table["title"].lower():
1817                                 name = _tpc_modify_displayed_test_name(name)
1818                             col_data["data"][tst_name_mod] = {
1819                                 "name": name,
1820                                 "replace": False,
1821                                 "data": list(),
1822                                 "mean": None,
1823                                 "stdev": None
1824                             }
1825                         if col_data["data"][tst_name_mod]["replace"]:
1826                             col_data["data"][tst_name_mod]["replace"] = False
1827                             col_data["data"][tst_name_mod]["data"] = list()
1828                         _tpc_insert_data(
1829                             target=col_data["data"][tst_name_mod],
1830                             src=tst_data,
1831                             include_tests=table["include-tests"]
1832                         )
1833
1834         if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
1835                 or "latency" in table["include-tests"]:
1836             for tst_name, tst_data in col_data["data"].items():
1837                 if tst_data["data"]:
1838                     tst_data["mean"] = mean(tst_data["data"])
1839                     tst_data["stdev"] = stdev(tst_data["data"])
1840
1841         cols.append(col_data)
1842
1843     tbl_dict = dict()
1844     for col in cols:
1845         for tst_name, tst_data in col["data"].items():
1846             if tbl_dict.get(tst_name, None) is None:
1847                 tbl_dict[tst_name] = {
1848                     "name": tst_data["name"]
1849                 }
1850             tbl_dict[tst_name][col["title"]] = {
1851                 "mean": tst_data["mean"],
1852                 "stdev": tst_data["stdev"]
1853             }
1854
1855     if not tbl_dict:
1856         logging.warning(f"No data for table {table.get('title', '')}!")
1857         return
1858
1859     tbl_lst = list()
1860     for tst_data in tbl_dict.values():
1861         row = [tst_data[u"name"], ]
1862         for col in cols:
1863             row.append(tst_data.get(col[u"title"], None))
1864         tbl_lst.append(row)
1865
1866     comparisons = table.get("comparisons", None)
1867     rcas = list()
1868     if comparisons and isinstance(comparisons, list):
1869         for idx, comp in enumerate(comparisons):
1870             try:
1871                 col_ref = int(comp["reference"])
1872                 col_cmp = int(comp["compare"])
1873             except KeyError:
1874                 logging.warning("Comparison: No references defined! Skipping.")
1875                 comparisons.pop(idx)
1876                 continue
1877             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1878                     col_ref == col_cmp):
1879                 logging.warning(f"Wrong values of reference={col_ref} "
1880                                 f"and/or compare={col_cmp}. Skipping.")
1881                 comparisons.pop(idx)
1882                 continue
1883             rca_file_name = comp.get("rca-file", None)
1884             if rca_file_name:
1885                 try:
1886                     with open(rca_file_name, "r") as file_handler:
1887                         rcas.append(
1888                             {
1889                                 "title": f"RCA{idx + 1}",
1890                                 "data": load(file_handler, Loader=FullLoader)
1891                             }
1892                         )
1893                 except (YAMLError, IOError) as err:
1894                     logging.warning(
1895                         f"The RCA file {rca_file_name} does not exist or "
1896                         f"it is corrupted!"
1897                     )
1898                     logging.debug(repr(err))
1899                     rcas.append(None)
1900             else:
1901                 rcas.append(None)
1902     else:
1903         comparisons = None
1904
1905     tbl_cmp_lst = list()
1906     if comparisons:
1907         for row in tbl_lst:
1908             new_row = deepcopy(row)
1909             for comp in comparisons:
1910                 ref_itm = row[int(comp["reference"])]
1911                 if ref_itm is None and \
1912                         comp.get("reference-alt", None) is not None:
1913                     ref_itm = row[int(comp["reference-alt"])]
1914                 cmp_itm = row[int(comp[u"compare"])]
1915                 if ref_itm is not None and cmp_itm is not None and \
1916                         ref_itm["mean"] is not None and \
1917                         cmp_itm["mean"] is not None and \
1918                         ref_itm["stdev"] is not None and \
1919                         cmp_itm["stdev"] is not None:
1920                     norm_factor_ref = table["norm_factor"].get(
1921                         comp.get("norm-ref", ""),
1922                         1.0
1923                     )
1924                     norm_factor_cmp = table["norm_factor"].get(
1925                         comp.get("norm-cmp", ""),
1926                         1.0
1927                     )
1928                     try:
1929                         delta, d_stdev = relative_change_stdev(
1930                             ref_itm["mean"] * norm_factor_ref,
1931                             cmp_itm["mean"] * norm_factor_cmp,
1932                             ref_itm["stdev"] * norm_factor_ref,
1933                             cmp_itm["stdev"] * norm_factor_cmp
1934                         )
1935                     except ZeroDivisionError:
1936                         break
1937                     if delta is None or math.isnan(delta):
1938                         break
1939                     new_row.append({
1940                         "mean": delta * 1e6,
1941                         "stdev": d_stdev * 1e6
1942                     })
1943                 else:
1944                     break
1945             else:
1946                 tbl_cmp_lst.append(new_row)
1947
1948     try:
1949         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1950         tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
1951     except TypeError as err:
1952         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1953
1954     tbl_for_csv = list()
1955     for line in tbl_cmp_lst:
1956         row = [line[0], ]
1957         for idx, itm in enumerate(line[1:]):
1958             if itm is None or not isinstance(itm, dict) or\
1959                     itm.get('mean', None) is None or \
1960                     itm.get('stdev', None) is None:
1961                 row.append("NT")
1962                 row.append("NT")
1963             else:
1964                 row.append(round(float(itm['mean']) / 1e6, 3))
1965                 row.append(round(float(itm['stdev']) / 1e6, 3))
1966         for rca in rcas:
1967             if rca is None:
1968                 continue
1969             rca_nr = rca["data"].get(row[0], "-")
1970             row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
1971         tbl_for_csv.append(row)
1972
1973     header_csv = ["Test Case", ]
1974     for col in cols:
1975         header_csv.append(f"Avg({col['title']})")
1976         header_csv.append(f"Stdev({col['title']})")
1977     for comp in comparisons:
1978         header_csv.append(
1979             f"Avg({comp.get('title', '')})"
1980         )
1981         header_csv.append(
1982             f"Stdev({comp.get('title', '')})"
1983         )
1984     for rca in rcas:
1985         if rca:
1986             header_csv.append(rca["title"])
1987
1988     legend_lst = table.get("legend", None)
1989     if legend_lst is None:
1990         legend = ""
1991     else:
1992         legend = "\n" + "\n".join(legend_lst) + "\n"
1993
1994     footnote = ""
1995     if rcas and any(rcas):
1996         footnote += "\nRoot Cause Analysis:\n"
1997         for rca in rcas:
1998             if rca:
1999                 footnote += f"{rca['data'].get('footnote', '')}\n"
2000
2001     csv_file_name = f"{table['output-file']}-csv.csv"
2002     with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2003         file_handler.write(
2004             ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
2005         )
2006         for test in tbl_for_csv:
2007             file_handler.write(
2008                 ",".join([f'"{item}"' for item in test]) + "\n"
2009             )
2010         if legend_lst:
2011             for item in legend_lst:
2012                 file_handler.write(f'"{item}"\n')
2013         if footnote:
2014             for itm in footnote.split("\n"):
2015                 file_handler.write(f'"{itm}"\n')
2016
2017     tbl_tmp = list()
2018     max_lens = [0, ] * len(tbl_cmp_lst[0])
2019     for line in tbl_cmp_lst:
2020         row = [line[0], ]
2021         for idx, itm in enumerate(line[1:]):
2022             if itm is None or not isinstance(itm, dict) or \
2023                     itm.get('mean', None) is None or \
2024                     itm.get('stdev', None) is None:
2025                 new_itm = "NT"
2026             else:
2027                 if idx < len(cols):
2028                     new_itm = (
2029                         f"{round(float(itm['mean']) / 1e6, 2)} "
2030                         f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2031                         replace("nan", "NaN")
2032                     )
2033                 else:
2034                     new_itm = (
2035                         f"{round(float(itm['mean']) / 1e6, 2):+} "
2036                         f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2037                         replace("nan", "NaN")
2038                     )
2039             if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
2040                 max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
2041             row.append(new_itm)
2042
2043         tbl_tmp.append(row)
2044
2045     header = ["Test Case", ]
2046     header.extend([col["title"] for col in cols])
2047     header.extend([comp.get("title", "") for comp in comparisons])
2048
2049     tbl_final = list()
2050     for line in tbl_tmp:
2051         row = [line[0], ]
2052         for idx, itm in enumerate(line[1:]):
2053             if itm in ("NT", "NaN"):
2054                 row.append(itm)
2055                 continue
2056             itm_lst = itm.rsplit("\u00B1", 1)
2057             itm_lst[-1] = \
2058                 f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2059             itm_str = "\u00B1".join(itm_lst)
2060
2061             if idx >= len(cols):
2062                 # Diffs
2063                 rca = rcas[idx - len(cols)]
2064                 if rca:
2065                     # Add rcas to diffs
2066                     rca_nr = rca["data"].get(row[0], None)
2067                     if rca_nr:
2068                         hdr_len = len(header[idx + 1]) - 1
2069                         if hdr_len < 19:
2070                             hdr_len = 19
2071                         rca_nr = f"[{rca_nr}]"
2072                         itm_str = (
2073                             f"{' ' * (4 - len(rca_nr))}{rca_nr}"
2074                             f"{' ' * (hdr_len - 4 - len(itm_str))}"
2075                             f"{itm_str}"
2076                         )
2077             row.append(itm_str)
2078         tbl_final.append(row)
2079
2080     # Generate csv tables:
2081     csv_file_name = f"{table['output-file']}.csv"
2082     logging.info(f"    Writing the file {csv_file_name}")
2083     with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2084         file_handler.write(";".join(header) + "\n")
2085         for test in tbl_final:
2086             file_handler.write(";".join([str(item) for item in test]) + "\n")
2087
2088     # Generate txt table:
2089     txt_file_name = f"{table['output-file']}.txt"
2090     logging.info(f"    Writing the file {txt_file_name}")
2091     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
2092
2093     with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
2094         file_handler.write(legend)
2095         file_handler.write(footnote)
2096
2097     # Generate html table:
2098     _tpc_generate_html_table(
2099         header,
2100         tbl_final,
2101         table['output-file'],
2102         legend=legend,
2103         footnote=footnote,
2104         sort_data=False,
2105         title=table.get("title", "")
2106     )
2107
2108
2109 def table_weekly_comparison(table, in_data):
2110     """Generate the table(s) with algorithm: table_weekly_comparison
2111     specified in the specification file.
2112
2113     :param table: Table to generate.
2114     :param in_data: Data to process.
2115     :type table: pandas.Series
2116     :type in_data: InputData
2117     """
2118     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2119
2120     # Transform the data
2121     logging.info(
2122         f"    Creating the data set for the {table.get(u'type', u'')} "
2123         f"{table.get(u'title', u'')}."
2124     )
2125
2126     incl_tests = table.get(u"include-tests", None)
2127     if incl_tests not in (u"NDR", u"PDR"):
2128         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2129         return
2130
2131     nr_cols = table.get(u"nr-of-data-columns", None)
2132     if not nr_cols or nr_cols < 2:
2133         logging.error(
2134             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2135         )
2136         return
2137
2138     data = in_data.filter_data(
2139         table,
2140         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2141         continue_on_error=True
2142     )
2143
2144     header = [
2145         [u"VPP Version", ],
2146         [u"Start Timestamp", ],
2147         [u"CSIT Build", ],
2148         [u"CSIT Testbed", ]
2149     ]
2150     tbl_dict = dict()
2151     idx = 0
2152     tb_tbl = table.get(u"testbeds", None)
2153     for job_name, job_data in data.items():
2154         for build_nr, build in job_data.items():
2155             if idx >= nr_cols:
2156                 break
2157             if build.empty:
2158                 continue
2159
2160             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2161             if tb_ip and tb_tbl:
2162                 testbed = tb_tbl.get(tb_ip, u"")
2163             else:
2164                 testbed = u""
2165             header[2].insert(1, build_nr)
2166             header[3].insert(1, testbed)
2167             header[1].insert(
2168                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2169             )
2170             logging.info(
2171                 in_data.metadata(job_name, build_nr).get(u"version", u"ERROR"))
2172             header[0].insert(
2173                 1, in_data.metadata(job_name, build_nr).get("version", build_nr)
2174             )
2175
2176             for tst_name, tst_data in build.items():
2177                 tst_name_mod = \
2178                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2179                 if not tbl_dict.get(tst_name_mod, None):
2180                     tbl_dict[tst_name_mod] = dict(
2181                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2182                     )
2183                 try:
2184                     tbl_dict[tst_name_mod][-idx - 1] = \
2185                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2186                 except (TypeError, IndexError, KeyError, ValueError):
2187                     pass
2188             idx += 1
2189
2190     if idx < nr_cols:
2191         logging.error(u"Not enough data to build the table! Skipping")
2192         return
2193
2194     cmp_dict = dict()
2195     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2196         idx_ref = cmp.get(u"reference", None)
2197         idx_cmp = cmp.get(u"compare", None)
2198         if idx_ref is None or idx_cmp is None:
2199             continue
2200         header[0].append(
2201             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2202             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2203         )
2204         header[1].append(u"")
2205         header[2].append(u"")
2206         header[3].append(u"")
2207         for tst_name, tst_data in tbl_dict.items():
2208             if not cmp_dict.get(tst_name, None):
2209                 cmp_dict[tst_name] = list()
2210             ref_data = tst_data.get(idx_ref, None)
2211             cmp_data = tst_data.get(idx_cmp, None)
2212             if ref_data is None or cmp_data is None:
2213                 cmp_dict[tst_name].append(float(u'nan'))
2214             else:
2215                 cmp_dict[tst_name].append(relative_change(ref_data, cmp_data))
2216
2217     tbl_lst_none = list()
2218     tbl_lst = list()
2219     for tst_name, tst_data in tbl_dict.items():
2220         itm_lst = [tst_data[u"name"], ]
2221         for idx in range(nr_cols):
2222             item = tst_data.get(-idx - 1, None)
2223             if item is None:
2224                 itm_lst.insert(1, None)
2225             else:
2226                 itm_lst.insert(1, round(item / 1e6, 1))
2227         itm_lst.extend(
2228             [
2229                 None if itm is None else round(itm, 1)
2230                 for itm in cmp_dict[tst_name]
2231             ]
2232         )
2233         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2234             tbl_lst_none.append(itm_lst)
2235         else:
2236             tbl_lst.append(itm_lst)
2237
2238     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2239     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2240     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2241     tbl_lst.extend(tbl_lst_none)
2242
2243     # Generate csv table:
2244     csv_file_name = f"{table[u'output-file']}.csv"
2245     logging.info(f"    Writing the file {csv_file_name}")
2246     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2247         for hdr in header:
2248             file_handler.write(u",".join(hdr) + u"\n")
2249         for test in tbl_lst:
2250             file_handler.write(u",".join(
2251                 [
2252                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2253                     replace(u"null", u"-") for item in test
2254                 ]
2255             ) + u"\n")
2256
2257     txt_file_name = f"{table[u'output-file']}.txt"
2258     logging.info(f"    Writing the file {txt_file_name}")
2259     try:
2260         convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2261     except Exception as err:
2262         logging.error(repr(err))
2263         for hdr in header:
2264             logging.info(",".join(hdr))
2265         for test in tbl_lst:
2266             logging.info(",".join(
2267                 [
2268                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2269                     replace(u"null", u"-") for item in test
2270                 ]
2271             ))
2272
2273     # Reorganize header in txt table
2274     txt_table = list()
2275     try:
2276         with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2277             for line in list(file_handler):
2278                 txt_table.append(line)
2279         txt_table.insert(5, txt_table.pop(2))
2280         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2281             file_handler.writelines(txt_table)
2282     except FileNotFoundError as err:
2283         logging.error(repr(err))
2284     except IndexError:
2285         pass
2286
2287     # Generate html table:
2288     hdr_html = [
2289         u"<br>".join(row) for row in zip(*header)
2290     ]
2291     _tpc_generate_html_table(
2292         hdr_html,
2293         tbl_lst,
2294         table[u'output-file'],
2295         sort_data=True,
2296         title=table.get(u"title", u""),
2297         generate_rst=False
2298     )