Trending: Add ethip4ipsecNspe tests for 2n-tx2
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     if gdata[u"vectors"] > 0:
301                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
302                     elif gdata[u"calls"] > 0:
303                         clocks = gdata[u"clocks"] / gdata[u"calls"]
304                     elif gdata[u"suspends"] > 0:
305                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
306                     else:
307                         clocks = 0.0
308                     if gdata[u"calls"] > 0:
309                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
310                     else:
311                         vectors_call = 0.0
312                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
313                             int(gdata[u"suspends"]):
314                         threads[idx].append([
315                             gnode,
316                             int(gdata[u"calls"]),
317                             int(gdata[u"vectors"]),
318                             int(gdata[u"suspends"]),
319                             clocks,
320                             vectors_call
321                         ])
322
323             bold = ET.SubElement(tcol, u"b")
324             bold.text = (
325                 f"Host IP: {dut_data.get(u'host', '')}, "
326                 f"Socket: {dut_data.get(u'socket', '')}"
327             )
328             trow = ET.SubElement(
329                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
330             )
331             thead = ET.SubElement(
332                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
333             )
334             thead.text = u"\t"
335
336             for thread_nr, thread in threads.items():
337                 trow = ET.SubElement(
338                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
339                 )
340                 tcol = ET.SubElement(
341                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
342                 )
343                 bold = ET.SubElement(tcol, u"b")
344                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
345                 trow = ET.SubElement(
346                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
347                 )
348                 for idx, col in enumerate(tbl_hdr):
349                     tcol = ET.SubElement(
350                         trow, u"td",
351                         attrib=dict(align=u"right" if idx else u"left")
352                     )
353                     font = ET.SubElement(
354                         tcol, u"font", attrib=dict(size=u"2")
355                     )
356                     bold = ET.SubElement(font, u"b")
357                     bold.text = col
358                 for row_nr, row in enumerate(thread):
359                     trow = ET.SubElement(
360                         tbl, u"tr",
361                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
362                     )
363                     for idx, col in enumerate(row):
364                         tcol = ET.SubElement(
365                             trow, u"td",
366                             attrib=dict(align=u"right" if idx else u"left")
367                         )
368                         font = ET.SubElement(
369                             tcol, u"font", attrib=dict(size=u"2")
370                         )
371                         if isinstance(col, float):
372                             font.text = f"{col:.2f}"
373                         else:
374                             font.text = str(col)
375                 trow = ET.SubElement(
376                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
377                 )
378                 thead = ET.SubElement(
379                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
380                 )
381                 thead.text = u"\t"
382
383         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
384         thead = ET.SubElement(
385             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
386         )
387         font = ET.SubElement(
388             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
389         )
390         font.text = u"."
391
392         return str(ET.tostring(tbl, encoding=u"unicode"))
393
394     for suite in suites.values:
395         html_table = str()
396         for test_data in data.values:
397             if test_data[u"parent"] not in suite[u"name"]:
398                 continue
399             html_table += _generate_html_table(test_data)
400         if not html_table:
401             continue
402         try:
403             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
404             with open(f"{file_name}", u'w') as html_file:
405                 logging.info(f"    Writing file: {file_name}")
406                 html_file.write(u".. raw:: html\n\n\t")
407                 html_file.write(html_table)
408                 html_file.write(u"\n\t<p><br><br></p>\n")
409         except KeyError:
410             logging.warning(u"The output file is not defined.")
411             return
412     logging.info(u"  Done.")
413
414
415 def table_merged_details(table, input_data):
416     """Generate the table(s) with algorithm: table_merged_details
417     specified in the specification file.
418
419     :param table: Table to generate.
420     :param input_data: Data to process.
421     :type table: pandas.Series
422     :type input_data: InputData
423     """
424
425     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
426
427     # Transform the data
428     logging.info(
429         f"    Creating the data set for the {table.get(u'type', u'')} "
430         f"{table.get(u'title', u'')}."
431     )
432     data = input_data.filter_data(table, continue_on_error=True)
433     data = input_data.merge_data(data)
434
435     sort_tests = table.get(u"sort", None)
436     if sort_tests:
437         args = dict(
438             inplace=True,
439             ascending=(sort_tests == u"ascending")
440         )
441         data.sort_index(**args)
442
443     suites = input_data.filter_data(
444         table, continue_on_error=True, data_set=u"suites")
445     suites = input_data.merge_data(suites)
446
447     # Prepare the header of the tables
448     header = list()
449     for column in table[u"columns"]:
450         header.append(
451             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
452         )
453
454     for suite in suites.values:
455         # Generate data
456         suite_name = suite[u"name"]
457         table_lst = list()
458         for test in data.keys():
459             if data[test][u"status"] != u"PASS" or \
460                     data[test][u"parent"] not in suite_name:
461                 continue
462             row_lst = list()
463             for column in table[u"columns"]:
464                 try:
465                     col_data = str(data[test][column[
466                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
467                     # Do not include tests with "Test Failed" in test message
468                     if u"Test Failed" in col_data:
469                         continue
470                     col_data = col_data.replace(
471                         u"No Data", u"Not Captured     "
472                     )
473                     if column[u"data"].split(u" ")[1] in (u"name", ):
474                         if len(col_data) > 30:
475                             col_data_lst = col_data.split(u"-")
476                             half = int(len(col_data_lst) / 2)
477                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
478                                        f"- |br| " \
479                                        f"{u'-'.join(col_data_lst[half:])}"
480                         col_data = f" |prein| {col_data} |preout| "
481                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
482                         # Temporary solution: remove NDR results from message:
483                         if bool(table.get(u'remove-ndr', False)):
484                             try:
485                                 col_data = col_data.split(u"\n", 1)[1]
486                             except IndexError:
487                                 pass
488                         col_data = col_data.replace(u'\n', u' |br| ').\
489                             replace(u'\r', u'').replace(u'"', u"'")
490                         col_data = f" |prein| {col_data} |preout| "
491                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
492                         col_data = col_data.replace(u'\n', u' |br| ')
493                         col_data = f" |prein| {col_data[:-5]} |preout| "
494                     row_lst.append(f'"{col_data}"')
495                 except KeyError:
496                     row_lst.append(u'"Not captured"')
497             if len(row_lst) == len(table[u"columns"]):
498                 table_lst.append(row_lst)
499
500         # Write the data to file
501         if table_lst:
502             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
503             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
504             logging.info(f"      Writing file: {file_name}")
505             with open(file_name, u"wt") as file_handler:
506                 file_handler.write(u",".join(header) + u"\n")
507                 for item in table_lst:
508                     file_handler.write(u",".join(item) + u"\n")
509
510     logging.info(u"  Done.")
511
512
513 def _tpc_modify_test_name(test_name, ignore_nic=False):
514     """Modify a test name by replacing its parts.
515
516     :param test_name: Test name to be modified.
517     :param ignore_nic: If True, NIC is removed from TC name.
518     :type test_name: str
519     :type ignore_nic: bool
520     :returns: Modified test name.
521     :rtype: str
522     """
523     test_name_mod = test_name.\
524         replace(u"-ndrpdr", u"").\
525         replace(u"1t1c", u"1c").\
526         replace(u"2t1c", u"1c"). \
527         replace(u"2t2c", u"2c").\
528         replace(u"4t2c", u"2c"). \
529         replace(u"4t4c", u"4c").\
530         replace(u"8t4c", u"4c")
531
532     if ignore_nic:
533         return re.sub(REGEX_NIC, u"", test_name_mod)
534     return test_name_mod
535
536
537 def _tpc_modify_displayed_test_name(test_name):
538     """Modify a test name which is displayed in a table by replacing its parts.
539
540     :param test_name: Test name to be modified.
541     :type test_name: str
542     :returns: Modified test name.
543     :rtype: str
544     """
545     return test_name.\
546         replace(u"1t1c", u"1c").\
547         replace(u"2t1c", u"1c"). \
548         replace(u"2t2c", u"2c").\
549         replace(u"4t2c", u"2c"). \
550         replace(u"4t4c", u"4c").\
551         replace(u"8t4c", u"4c")
552
553
554 def _tpc_insert_data(target, src, include_tests):
555     """Insert src data to the target structure.
556
557     :param target: Target structure where the data is placed.
558     :param src: Source data to be placed into the target structure.
559     :param include_tests: Which results will be included (MRR, NDR, PDR).
560     :type target: list
561     :type src: dict
562     :type include_tests: str
563     """
564     try:
565         if include_tests == u"MRR":
566             target[u"mean"] = src[u"result"][u"receive-rate"]
567             target[u"stdev"] = src[u"result"][u"receive-stdev"]
568         elif include_tests == u"PDR":
569             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
570         elif include_tests == u"NDR":
571             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
572         elif u"latency" in include_tests:
573             keys = include_tests.split(u"-")
574             if len(keys) == 4:
575                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
576                 target[u"data"].append(
577                     float(u"nan") if lat == -1 else lat * 1e6
578                 )
579     except (KeyError, TypeError):
580         pass
581
582
583 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
584                              footnote=u"", sort_data=True, title=u"",
585                              generate_rst=True):
586     """Generate html table from input data with simple sorting possibility.
587
588     :param header: Table header.
589     :param data: Input data to be included in the table. It is a list of lists.
590         Inner lists are rows in the table. All inner lists must be of the same
591         length. The length of these lists must be the same as the length of the
592         header.
593     :param out_file_name: The name (relative or full path) where the
594         generated html table is written.
595     :param legend: The legend to display below the table.
596     :param footnote: The footnote to display below the table (and legend).
597     :param sort_data: If True the data sorting is enabled.
598     :param title: The table (and file) title.
599     :param generate_rst: If True, wrapping rst file is generated.
600     :type header: list
601     :type data: list of lists
602     :type out_file_name: str
603     :type legend: str
604     :type footnote: str
605     :type sort_data: bool
606     :type title: str
607     :type generate_rst: bool
608     """
609
610     try:
611         idx = header.index(u"Test Case")
612     except ValueError:
613         idx = 0
614     params = {
615         u"align-hdr": (
616             [u"left", u"right"],
617             [u"left", u"left", u"right"],
618             [u"left", u"left", u"left", u"right"]
619         ),
620         u"align-itm": (
621             [u"left", u"right"],
622             [u"left", u"left", u"right"],
623             [u"left", u"left", u"left", u"right"]
624         ),
625         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
626     }
627
628     df_data = pd.DataFrame(data, columns=header)
629
630     if sort_data:
631         df_sorted = [df_data.sort_values(
632             by=[key, header[idx]], ascending=[True, True]
633             if key != header[idx] else [False, True]) for key in header]
634         df_sorted_rev = [df_data.sort_values(
635             by=[key, header[idx]], ascending=[False, True]
636             if key != header[idx] else [True, True]) for key in header]
637         df_sorted.extend(df_sorted_rev)
638     else:
639         df_sorted = df_data
640
641     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
642                    for idx in range(len(df_data))]]
643     table_header = dict(
644         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
645         fill_color=u"#7eade7",
646         align=params[u"align-hdr"][idx],
647         font=dict(
648             family=u"Courier New",
649             size=12
650         )
651     )
652
653     fig = go.Figure()
654
655     if sort_data:
656         for table in df_sorted:
657             columns = [table.get(col) for col in header]
658             fig.add_trace(
659                 go.Table(
660                     columnwidth=params[u"width"][idx],
661                     header=table_header,
662                     cells=dict(
663                         values=columns,
664                         fill_color=fill_color,
665                         align=params[u"align-itm"][idx],
666                         font=dict(
667                             family=u"Courier New",
668                             size=12
669                         )
670                     )
671                 )
672             )
673
674         buttons = list()
675         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
676         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
677         for idx, hdr in enumerate(menu_items):
678             visible = [False, ] * len(menu_items)
679             visible[idx] = True
680             buttons.append(
681                 dict(
682                     label=hdr.replace(u" [Mpps]", u""),
683                     method=u"update",
684                     args=[{u"visible": visible}],
685                 )
686             )
687
688         fig.update_layout(
689             updatemenus=[
690                 go.layout.Updatemenu(
691                     type=u"dropdown",
692                     direction=u"down",
693                     x=0.0,
694                     xanchor=u"left",
695                     y=1.002,
696                     yanchor=u"bottom",
697                     active=len(menu_items) - 1,
698                     buttons=list(buttons)
699                 )
700             ],
701         )
702     else:
703         fig.add_trace(
704             go.Table(
705                 columnwidth=params[u"width"][idx],
706                 header=table_header,
707                 cells=dict(
708                     values=[df_sorted.get(col) for col in header],
709                     fill_color=fill_color,
710                     align=params[u"align-itm"][idx],
711                     font=dict(
712                         family=u"Courier New",
713                         size=12
714                     )
715                 )
716             )
717         )
718
719     ploff.plot(
720         fig,
721         show_link=False,
722         auto_open=False,
723         filename=f"{out_file_name}_in.html"
724     )
725
726     if not generate_rst:
727         return
728
729     file_name = out_file_name.split(u"/")[-1]
730     if u"vpp" in out_file_name:
731         path = u"_tmp/src/vpp_performance_tests/comparisons/"
732     else:
733         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
734     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
735     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
736         rst_file.write(
737             u"\n"
738             u".. |br| raw:: html\n\n    <br />\n\n\n"
739             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
740             u".. |preout| raw:: html\n\n    </pre>\n\n"
741         )
742         if title:
743             rst_file.write(f"{title}\n")
744             rst_file.write(f"{u'`' * len(title)}\n\n")
745         rst_file.write(
746             u".. raw:: html\n\n"
747             f'    <iframe frameborder="0" scrolling="no" '
748             f'width="1600" height="1200" '
749             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
750             f'</iframe>\n\n'
751         )
752
753         if legend:
754             try:
755                 itm_lst = legend[1:-2].split(u"\n")
756                 rst_file.write(
757                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
758                 )
759             except IndexError as err:
760                 logging.error(f"Legend cannot be written to html file\n{err}")
761         if footnote:
762             try:
763                 itm_lst = footnote[1:].split(u"\n")
764                 rst_file.write(
765                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
766                 )
767             except IndexError as err:
768                 logging.error(f"Footnote cannot be written to html file\n{err}")
769
770
771 def table_soak_vs_ndr(table, input_data):
772     """Generate the table(s) with algorithm: table_soak_vs_ndr
773     specified in the specification file.
774
775     :param table: Table to generate.
776     :param input_data: Data to process.
777     :type table: pandas.Series
778     :type input_data: InputData
779     """
780
781     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
782
783     # Transform the data
784     logging.info(
785         f"    Creating the data set for the {table.get(u'type', u'')} "
786         f"{table.get(u'title', u'')}."
787     )
788     data = input_data.filter_data(table, continue_on_error=True)
789
790     # Prepare the header of the table
791     try:
792         header = [
793             u"Test Case",
794             f"Avg({table[u'reference'][u'title']})",
795             f"Stdev({table[u'reference'][u'title']})",
796             f"Avg({table[u'compare'][u'title']})",
797             f"Stdev{table[u'compare'][u'title']})",
798             u"Diff",
799             u"Stdev(Diff)"
800         ]
801         header_str = u";".join(header) + u"\n"
802         legend = (
803             u"\nLegend:\n"
804             f"Avg({table[u'reference'][u'title']}): "
805             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
806             f"from a series of runs of the listed tests.\n"
807             f"Stdev({table[u'reference'][u'title']}): "
808             f"Standard deviation value of {table[u'reference'][u'title']} "
809             f"[Mpps] computed from a series of runs of the listed tests.\n"
810             f"Avg({table[u'compare'][u'title']}): "
811             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
812             f"a series of runs of the listed tests.\n"
813             f"Stdev({table[u'compare'][u'title']}): "
814             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
815             f"computed from a series of runs of the listed tests.\n"
816             f"Diff({table[u'reference'][u'title']},"
817             f"{table[u'compare'][u'title']}): "
818             f"Percentage change calculated for mean values.\n"
819             u"Stdev(Diff): "
820             u"Standard deviation of percentage change calculated for mean "
821             u"values."
822         )
823     except (AttributeError, KeyError) as err:
824         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
825         return
826
827     # Create a list of available SOAK test results:
828     tbl_dict = dict()
829     for job, builds in table[u"compare"][u"data"].items():
830         for build in builds:
831             for tst_name, tst_data in data[job][str(build)].items():
832                 if tst_data[u"type"] == u"SOAK":
833                     tst_name_mod = tst_name.replace(u"-soak", u"")
834                     if tbl_dict.get(tst_name_mod, None) is None:
835                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
836                         nic = groups.group(0) if groups else u""
837                         name = (
838                             f"{nic}-"
839                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
840                         )
841                         tbl_dict[tst_name_mod] = {
842                             u"name": name,
843                             u"ref-data": list(),
844                             u"cmp-data": list()
845                         }
846                     try:
847                         tbl_dict[tst_name_mod][u"cmp-data"].append(
848                             tst_data[u"throughput"][u"LOWER"])
849                     except (KeyError, TypeError):
850                         pass
851     tests_lst = tbl_dict.keys()
852
853     # Add corresponding NDR test results:
854     for job, builds in table[u"reference"][u"data"].items():
855         for build in builds:
856             for tst_name, tst_data in data[job][str(build)].items():
857                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
858                     replace(u"-mrr", u"")
859                 if tst_name_mod not in tests_lst:
860                     continue
861                 try:
862                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
863                         continue
864                     if table[u"include-tests"] == u"MRR":
865                         result = (tst_data[u"result"][u"receive-rate"],
866                                   tst_data[u"result"][u"receive-stdev"])
867                     elif table[u"include-tests"] == u"PDR":
868                         result = \
869                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
870                     elif table[u"include-tests"] == u"NDR":
871                         result = \
872                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
873                     else:
874                         result = None
875                     if result is not None:
876                         tbl_dict[tst_name_mod][u"ref-data"].append(
877                             result)
878                 except (KeyError, TypeError):
879                     continue
880
881     tbl_lst = list()
882     for tst_name in tbl_dict:
883         item = [tbl_dict[tst_name][u"name"], ]
884         data_r = tbl_dict[tst_name][u"ref-data"]
885         if data_r:
886             if table[u"include-tests"] == u"MRR":
887                 data_r_mean = data_r[0][0]
888                 data_r_stdev = data_r[0][1]
889             else:
890                 data_r_mean = mean(data_r)
891                 data_r_stdev = stdev(data_r)
892             item.append(round(data_r_mean / 1e6, 1))
893             item.append(round(data_r_stdev / 1e6, 1))
894         else:
895             data_r_mean = None
896             data_r_stdev = None
897             item.extend([None, None])
898         data_c = tbl_dict[tst_name][u"cmp-data"]
899         if data_c:
900             if table[u"include-tests"] == u"MRR":
901                 data_c_mean = data_c[0][0]
902                 data_c_stdev = data_c[0][1]
903             else:
904                 data_c_mean = mean(data_c)
905                 data_c_stdev = stdev(data_c)
906             item.append(round(data_c_mean / 1e6, 1))
907             item.append(round(data_c_stdev / 1e6, 1))
908         else:
909             data_c_mean = None
910             data_c_stdev = None
911             item.extend([None, None])
912         if data_r_mean is not None and data_c_mean is not None:
913             delta, d_stdev = relative_change_stdev(
914                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
915             try:
916                 item.append(round(delta))
917             except ValueError:
918                 item.append(delta)
919             try:
920                 item.append(round(d_stdev))
921             except ValueError:
922                 item.append(d_stdev)
923             tbl_lst.append(item)
924
925     # Sort the table according to the relative change
926     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
927
928     # Generate csv tables:
929     csv_file_name = f"{table[u'output-file']}.csv"
930     with open(csv_file_name, u"wt") as file_handler:
931         file_handler.write(header_str)
932         for test in tbl_lst:
933             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
934
935     convert_csv_to_pretty_txt(
936         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
937     )
938     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
939         file_handler.write(legend)
940
941     # Generate html table:
942     _tpc_generate_html_table(
943         header,
944         tbl_lst,
945         table[u'output-file'],
946         legend=legend,
947         title=table.get(u"title", u"")
948     )
949
950
951 def table_perf_trending_dash(table, input_data):
952     """Generate the table(s) with algorithm:
953     table_perf_trending_dash
954     specified in the specification file.
955
956     :param table: Table to generate.
957     :param input_data: Data to process.
958     :type table: pandas.Series
959     :type input_data: InputData
960     """
961
962     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
963
964     # Transform the data
965     logging.info(
966         f"    Creating the data set for the {table.get(u'type', u'')} "
967         f"{table.get(u'title', u'')}."
968     )
969     data = input_data.filter_data(table, continue_on_error=True)
970
971     # Prepare the header of the tables
972     header = [
973         u"Test Case",
974         u"Trend [Mpps]",
975         u"Short-Term Change [%]",
976         u"Long-Term Change [%]",
977         u"Regressions [#]",
978         u"Progressions [#]"
979     ]
980     header_str = u",".join(header) + u"\n"
981
982     incl_tests = table.get(u"include-tests", u"MRR")
983
984     # Prepare data to the table:
985     tbl_dict = dict()
986     for job, builds in table[u"data"].items():
987         for build in builds:
988             for tst_name, tst_data in data[job][str(build)].items():
989                 if tst_name.lower() in table.get(u"ignore-list", list()):
990                     continue
991                 if tbl_dict.get(tst_name, None) is None:
992                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
993                     if not groups:
994                         continue
995                     nic = groups.group(0)
996                     tbl_dict[tst_name] = {
997                         u"name": f"{nic}-{tst_data[u'name']}",
998                         u"data": OrderedDict()
999                     }
1000                 try:
1001                     if incl_tests == u"MRR":
1002                         tbl_dict[tst_name][u"data"][str(build)] = \
1003                             tst_data[u"result"][u"receive-rate"]
1004                     elif incl_tests == u"NDR":
1005                         tbl_dict[tst_name][u"data"][str(build)] = \
1006                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1007                     elif incl_tests == u"PDR":
1008                         tbl_dict[tst_name][u"data"][str(build)] = \
1009                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1010                 except (TypeError, KeyError):
1011                     pass  # No data in output.xml for this test
1012
1013     tbl_lst = list()
1014     for tst_name in tbl_dict:
1015         data_t = tbl_dict[tst_name][u"data"]
1016         if len(data_t) < 2:
1017             continue
1018
1019         try:
1020             classification_lst, avgs, _ = classify_anomalies(data_t)
1021         except ValueError as err:
1022             logging.info(f"{err} Skipping")
1023             return
1024
1025         win_size = min(len(data_t), table[u"window"])
1026         long_win_size = min(len(data_t), table[u"long-trend-window"])
1027
1028         try:
1029             max_long_avg = max(
1030                 [x for x in avgs[-long_win_size:-win_size]
1031                  if not isnan(x)])
1032         except ValueError:
1033             max_long_avg = nan
1034         last_avg = avgs[-1]
1035         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1036
1037         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1038             rel_change_last = nan
1039         else:
1040             rel_change_last = round(
1041                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1042
1043         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1044             rel_change_long = nan
1045         else:
1046             rel_change_long = round(
1047                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1048
1049         if classification_lst:
1050             if isnan(rel_change_last) and isnan(rel_change_long):
1051                 continue
1052             if isnan(last_avg) or isnan(rel_change_last) or \
1053                     isnan(rel_change_long):
1054                 continue
1055             tbl_lst.append(
1056                 [tbl_dict[tst_name][u"name"],
1057                  round(last_avg / 1e6, 2),
1058                  rel_change_last,
1059                  rel_change_long,
1060                  classification_lst[-win_size+1:].count(u"regression"),
1061                  classification_lst[-win_size+1:].count(u"progression")])
1062
1063     tbl_lst.sort(key=lambda rel: rel[0])
1064     tbl_lst.sort(key=lambda rel: rel[3])
1065     tbl_lst.sort(key=lambda rel: rel[2])
1066
1067     tbl_sorted = list()
1068     for nrr in range(table[u"window"], -1, -1):
1069         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1070         for nrp in range(table[u"window"], -1, -1):
1071             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1072             tbl_sorted.extend(tbl_out)
1073
1074     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1075
1076     logging.info(f"    Writing file: {file_name}")
1077     with open(file_name, u"wt") as file_handler:
1078         file_handler.write(header_str)
1079         for test in tbl_sorted:
1080             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1081
1082     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1083     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1084
1085
1086 def _generate_url(testbed, test_name):
1087     """Generate URL to a trending plot from the name of the test case.
1088
1089     :param testbed: The testbed used for testing.
1090     :param test_name: The name of the test case.
1091     :type testbed: str
1092     :type test_name: str
1093     :returns: The URL to the plot with the trending data for the given test
1094         case.
1095     :rtype str
1096     """
1097
1098     if u"x520" in test_name:
1099         nic = u"x520"
1100     elif u"x710" in test_name:
1101         nic = u"x710"
1102     elif u"xl710" in test_name:
1103         nic = u"xl710"
1104     elif u"xxv710" in test_name:
1105         nic = u"xxv710"
1106     elif u"vic1227" in test_name:
1107         nic = u"vic1227"
1108     elif u"vic1385" in test_name:
1109         nic = u"vic1385"
1110     elif u"x553" in test_name:
1111         nic = u"x553"
1112     elif u"cx556" in test_name or u"cx556a" in test_name:
1113         nic = u"cx556a"
1114     elif u"ena" in test_name:
1115         nic = u"nitro50g"
1116     else:
1117         nic = u""
1118
1119     if u"64b" in test_name:
1120         frame_size = u"64b"
1121     elif u"78b" in test_name:
1122         frame_size = u"78b"
1123     elif u"imix" in test_name:
1124         frame_size = u"imix"
1125     elif u"9000b" in test_name:
1126         frame_size = u"9000b"
1127     elif u"1518b" in test_name:
1128         frame_size = u"1518b"
1129     elif u"114b" in test_name:
1130         frame_size = u"114b"
1131     else:
1132         frame_size = u""
1133
1134     if u"1t1c" in test_name or \
1135         (u"-1c-" in test_name and
1136          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1137         cores = u"1t1c"
1138     elif u"2t2c" in test_name or \
1139          (u"-2c-" in test_name and
1140           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1141         cores = u"2t2c"
1142     elif u"4t4c" in test_name or \
1143          (u"-4c-" in test_name and
1144           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1145         cores = u"4t4c"
1146     elif u"2t1c" in test_name or \
1147          (u"-1c-" in test_name and
1148           testbed in
1149           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1150         cores = u"2t1c"
1151     elif u"4t2c" in test_name or \
1152          (u"-2c-" in test_name and
1153           testbed in
1154           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1155         cores = u"4t2c"
1156     elif u"8t4c" in test_name or \
1157          (u"-4c-" in test_name and
1158           testbed in
1159           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1160         cores = u"8t4c"
1161     else:
1162         cores = u""
1163
1164     if u"testpmd" in test_name:
1165         driver = u"testpmd"
1166     elif u"l3fwd" in test_name:
1167         driver = u"l3fwd"
1168     elif u"avf" in test_name:
1169         driver = u"avf"
1170     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1171         driver = u"af_xdp"
1172     elif u"rdma" in test_name:
1173         driver = u"rdma"
1174     elif u"dnv" in testbed or u"tsh" in testbed:
1175         driver = u"ixgbe"
1176     elif u"ena" in test_name:
1177         driver = u"ena"
1178     else:
1179         driver = u"dpdk"
1180
1181     if u"macip-iacl1s" in test_name:
1182         bsf = u"features-macip-iacl1"
1183     elif u"macip-iacl10s" in test_name:
1184         bsf = u"features-macip-iacl10"
1185     elif u"macip-iacl50s" in test_name:
1186         bsf = u"features-macip-iacl50"
1187     elif u"iacl1s" in test_name:
1188         bsf = u"features-iacl1"
1189     elif u"iacl10s" in test_name:
1190         bsf = u"features-iacl10"
1191     elif u"iacl50s" in test_name:
1192         bsf = u"features-iacl50"
1193     elif u"oacl1s" in test_name:
1194         bsf = u"features-oacl1"
1195     elif u"oacl10s" in test_name:
1196         bsf = u"features-oacl10"
1197     elif u"oacl50s" in test_name:
1198         bsf = u"features-oacl50"
1199     elif u"nat44det" in test_name:
1200         bsf = u"nat44det-bidir"
1201     elif u"nat44ed" in test_name and u"udir" in test_name:
1202         bsf = u"nat44ed-udir"
1203     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1204         bsf = u"udp-cps"
1205     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1206         bsf = u"tcp-cps"
1207     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1208         bsf = u"udp-pps"
1209     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1210         bsf = u"tcp-pps"
1211     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1212         bsf = u"udp-tput"
1213     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1214         bsf = u"tcp-tput"
1215     elif u"udpsrcscale" in test_name:
1216         bsf = u"features-udp"
1217     elif u"iacl" in test_name:
1218         bsf = u"features"
1219     elif u"policer" in test_name:
1220         bsf = u"features"
1221     elif u"adl" in test_name:
1222         bsf = u"features"
1223     elif u"cop" in test_name:
1224         bsf = u"features"
1225     elif u"nat" in test_name:
1226         bsf = u"features"
1227     elif u"macip" in test_name:
1228         bsf = u"features"
1229     elif u"scale" in test_name:
1230         bsf = u"scale"
1231     elif u"base" in test_name:
1232         bsf = u"base"
1233     else:
1234         bsf = u"base"
1235
1236     if u"114b" in test_name and u"vhost" in test_name:
1237         domain = u"vts"
1238     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1239         domain = u"nat44"
1240         if u"nat44det" in test_name:
1241             domain += u"-det-bidir"
1242         else:
1243             domain += u"-ed"
1244         if u"udir" in test_name:
1245             domain += u"-unidir"
1246         elif u"-ethip4udp-" in test_name:
1247             domain += u"-udp"
1248         elif u"-ethip4tcp-" in test_name:
1249             domain += u"-tcp"
1250         if u"-cps" in test_name:
1251             domain += u"-cps"
1252         elif u"-pps" in test_name:
1253             domain += u"-pps"
1254         elif u"-tput" in test_name:
1255             domain += u"-tput"
1256     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1257         domain = u"dpdk"
1258     elif u"memif" in test_name:
1259         domain = u"container_memif"
1260     elif u"srv6" in test_name:
1261         domain = u"srv6"
1262     elif u"vhost" in test_name:
1263         domain = u"vhost"
1264         if u"vppl2xc" in test_name:
1265             driver += u"-vpp"
1266         else:
1267             driver += u"-testpmd"
1268         if u"lbvpplacp" in test_name:
1269             bsf += u"-link-bonding"
1270     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1271         domain = u"nf_service_density_vnfc"
1272     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1273         domain = u"nf_service_density_cnfc"
1274     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1275         domain = u"nf_service_density_cnfp"
1276     elif u"ipsec" in test_name:
1277         domain = u"ipsec"
1278         if u"sw" in test_name:
1279             bsf += u"-sw"
1280         elif u"hw" in test_name:
1281             bsf += u"-hw"
1282         elif u"spe" in test_name:
1283             bsf += u"-spe"
1284     elif u"ethip4vxlan" in test_name:
1285         domain = u"ip4_tunnels"
1286     elif u"ethip4udpgeneve" in test_name:
1287         domain = u"ip4_tunnels"
1288     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1289         domain = u"ip4"
1290     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1291         domain = u"ip6"
1292     elif u"l2xcbase" in test_name or \
1293             u"l2xcscale" in test_name or \
1294             u"l2bdbasemaclrn" in test_name or \
1295             u"l2bdscale" in test_name or \
1296             u"l2patch" in test_name:
1297         domain = u"l2"
1298     else:
1299         domain = u""
1300
1301     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1302     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1303
1304     return file_name + anchor_name
1305
1306
1307 def table_perf_trending_dash_html(table, input_data):
1308     """Generate the table(s) with algorithm:
1309     table_perf_trending_dash_html specified in the specification
1310     file.
1311
1312     :param table: Table to generate.
1313     :param input_data: Data to process.
1314     :type table: dict
1315     :type input_data: InputData
1316     """
1317
1318     _ = input_data
1319
1320     if not table.get(u"testbed", None):
1321         logging.error(
1322             f"The testbed is not defined for the table "
1323             f"{table.get(u'title', u'')}. Skipping."
1324         )
1325         return
1326
1327     test_type = table.get(u"test-type", u"MRR")
1328     if test_type not in (u"MRR", u"NDR", u"PDR"):
1329         logging.error(
1330             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1331             f"Skipping."
1332         )
1333         return
1334
1335     if test_type in (u"NDR", u"PDR"):
1336         lnk_dir = u"../ndrpdr_trending/"
1337         lnk_sufix = f"-{test_type.lower()}"
1338     else:
1339         lnk_dir = u"../trending/"
1340         lnk_sufix = u""
1341
1342     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1343
1344     try:
1345         with open(table[u"input-file"], u'rt') as csv_file:
1346             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1347     except FileNotFoundError as err:
1348         logging.warning(f"{err}")
1349         return
1350     except KeyError:
1351         logging.warning(u"The input file is not defined.")
1352         return
1353     except csv.Error as err:
1354         logging.warning(
1355             f"Not possible to process the file {table[u'input-file']}.\n"
1356             f"{repr(err)}"
1357         )
1358         return
1359
1360     # Table:
1361     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1362
1363     # Table header:
1364     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1365     for idx, item in enumerate(csv_lst[0]):
1366         alignment = u"left" if idx == 0 else u"center"
1367         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1368         thead.text = item
1369
1370     # Rows:
1371     colors = {
1372         u"regression": (
1373             u"#ffcccc",
1374             u"#ff9999"
1375         ),
1376         u"progression": (
1377             u"#c6ecc6",
1378             u"#9fdf9f"
1379         ),
1380         u"normal": (
1381             u"#e9f1fb",
1382             u"#d4e4f7"
1383         )
1384     }
1385     for r_idx, row in enumerate(csv_lst[1:]):
1386         if int(row[4]):
1387             color = u"regression"
1388         elif int(row[5]):
1389             color = u"progression"
1390         else:
1391             color = u"normal"
1392         trow = ET.SubElement(
1393             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1394         )
1395
1396         # Columns:
1397         for c_idx, item in enumerate(row):
1398             tdata = ET.SubElement(
1399                 trow,
1400                 u"td",
1401                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1402             )
1403             # Name:
1404             if c_idx == 0 and table.get(u"add-links", True):
1405                 ref = ET.SubElement(
1406                     tdata,
1407                     u"a",
1408                     attrib=dict(
1409                         href=f"{lnk_dir}"
1410                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1411                         f"{lnk_sufix}"
1412                     )
1413                 )
1414                 ref.text = item
1415             else:
1416                 tdata.text = item
1417     try:
1418         with open(table[u"output-file"], u'w') as html_file:
1419             logging.info(f"    Writing file: {table[u'output-file']}")
1420             html_file.write(u".. raw:: html\n\n\t")
1421             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1422             html_file.write(u"\n\t<p><br><br></p>\n")
1423     except KeyError:
1424         logging.warning(u"The output file is not defined.")
1425         return
1426
1427
1428 def table_last_failed_tests(table, input_data):
1429     """Generate the table(s) with algorithm: table_last_failed_tests
1430     specified in the specification file.
1431
1432     :param table: Table to generate.
1433     :param input_data: Data to process.
1434     :type table: pandas.Series
1435     :type input_data: InputData
1436     """
1437
1438     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1439
1440     # Transform the data
1441     logging.info(
1442         f"    Creating the data set for the {table.get(u'type', u'')} "
1443         f"{table.get(u'title', u'')}."
1444     )
1445
1446     data = input_data.filter_data(table, continue_on_error=True)
1447
1448     if data is None or data.empty:
1449         logging.warning(
1450             f"    No data for the {table.get(u'type', u'')} "
1451             f"{table.get(u'title', u'')}."
1452         )
1453         return
1454
1455     tbl_list = list()
1456     for job, builds in table[u"data"].items():
1457         for build in builds:
1458             build = str(build)
1459             try:
1460                 version = input_data.metadata(job, build).get(u"version", u"")
1461                 duration = \
1462                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1463             except KeyError:
1464                 logging.error(f"Data for {job}: {build} is not present.")
1465                 return
1466             tbl_list.append(build)
1467             tbl_list.append(version)
1468             failed_tests = list()
1469             passed = 0
1470             failed = 0
1471             for tst_data in data[job][build].values:
1472                 if tst_data[u"status"] != u"FAIL":
1473                     passed += 1
1474                     continue
1475                 failed += 1
1476                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1477                 if not groups:
1478                     continue
1479                 nic = groups.group(0)
1480                 msg = tst_data[u'msg'].replace(u"\n", u"")
1481                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1482                              'xxx.xxx.xxx.xxx', msg)
1483                 msg = msg.split(u'Also teardown failed')[0]
1484                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1485             tbl_list.append(passed)
1486             tbl_list.append(failed)
1487             tbl_list.append(duration)
1488             tbl_list.extend(failed_tests)
1489
1490     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1491     logging.info(f"    Writing file: {file_name}")
1492     with open(file_name, u"wt") as file_handler:
1493         for test in tbl_list:
1494             file_handler.write(f"{test}\n")
1495
1496
1497 def table_failed_tests(table, input_data):
1498     """Generate the table(s) with algorithm: table_failed_tests
1499     specified in the specification file.
1500
1501     :param table: Table to generate.
1502     :param input_data: Data to process.
1503     :type table: pandas.Series
1504     :type input_data: InputData
1505     """
1506
1507     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1508
1509     # Transform the data
1510     logging.info(
1511         f"    Creating the data set for the {table.get(u'type', u'')} "
1512         f"{table.get(u'title', u'')}."
1513     )
1514     data = input_data.filter_data(table, continue_on_error=True)
1515
1516     test_type = u"MRR"
1517     if u"NDRPDR" in table.get(u"filter", list()):
1518         test_type = u"NDRPDR"
1519
1520     # Prepare the header of the tables
1521     header = [
1522         u"Test Case",
1523         u"Failures [#]",
1524         u"Last Failure [Time]",
1525         u"Last Failure [VPP-Build-Id]",
1526         u"Last Failure [CSIT-Job-Build-Id]"
1527     ]
1528
1529     # Generate the data for the table according to the model in the table
1530     # specification
1531
1532     now = dt.utcnow()
1533     timeperiod = timedelta(int(table.get(u"window", 7)))
1534
1535     tbl_dict = dict()
1536     for job, builds in table[u"data"].items():
1537         for build in builds:
1538             build = str(build)
1539             for tst_name, tst_data in data[job][build].items():
1540                 if tst_name.lower() in table.get(u"ignore-list", list()):
1541                     continue
1542                 if tbl_dict.get(tst_name, None) is None:
1543                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1544                     if not groups:
1545                         continue
1546                     nic = groups.group(0)
1547                     tbl_dict[tst_name] = {
1548                         u"name": f"{nic}-{tst_data[u'name']}",
1549                         u"data": OrderedDict()
1550                     }
1551                 try:
1552                     generated = input_data.metadata(job, build).\
1553                         get(u"generated", u"")
1554                     if not generated:
1555                         continue
1556                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1557                     if (now - then) <= timeperiod:
1558                         tbl_dict[tst_name][u"data"][build] = (
1559                             tst_data[u"status"],
1560                             generated,
1561                             input_data.metadata(job, build).get(u"version",
1562                                                                 u""),
1563                             build
1564                         )
1565                 except (TypeError, KeyError) as err:
1566                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1567
1568     max_fails = 0
1569     tbl_lst = list()
1570     for tst_data in tbl_dict.values():
1571         fails_nr = 0
1572         fails_last_date = u""
1573         fails_last_vpp = u""
1574         fails_last_csit = u""
1575         for val in tst_data[u"data"].values():
1576             if val[0] == u"FAIL":
1577                 fails_nr += 1
1578                 fails_last_date = val[1]
1579                 fails_last_vpp = val[2]
1580                 fails_last_csit = val[3]
1581         if fails_nr:
1582             max_fails = fails_nr if fails_nr > max_fails else max_fails
1583             tbl_lst.append([
1584                 tst_data[u"name"],
1585                 fails_nr,
1586                 fails_last_date,
1587                 fails_last_vpp,
1588                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1589                 f"-build-{fails_last_csit}"
1590             ])
1591
1592     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1593     tbl_sorted = list()
1594     for nrf in range(max_fails, -1, -1):
1595         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1596         tbl_sorted.extend(tbl_fails)
1597
1598     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1599     logging.info(f"    Writing file: {file_name}")
1600     with open(file_name, u"wt") as file_handler:
1601         file_handler.write(u",".join(header) + u"\n")
1602         for test in tbl_sorted:
1603             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1604
1605     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1606     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1607
1608
1609 def table_failed_tests_html(table, input_data):
1610     """Generate the table(s) with algorithm: table_failed_tests_html
1611     specified in the specification file.
1612
1613     :param table: Table to generate.
1614     :param input_data: Data to process.
1615     :type table: pandas.Series
1616     :type input_data: InputData
1617     """
1618
1619     _ = input_data
1620
1621     if not table.get(u"testbed", None):
1622         logging.error(
1623             f"The testbed is not defined for the table "
1624             f"{table.get(u'title', u'')}. Skipping."
1625         )
1626         return
1627
1628     test_type = table.get(u"test-type", u"MRR")
1629     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1630         logging.error(
1631             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1632             f"Skipping."
1633         )
1634         return
1635
1636     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1637         lnk_dir = u"../ndrpdr_trending/"
1638         lnk_sufix = u"-pdr"
1639     else:
1640         lnk_dir = u"../trending/"
1641         lnk_sufix = u""
1642
1643     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1644
1645     try:
1646         with open(table[u"input-file"], u'rt') as csv_file:
1647             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1648     except KeyError:
1649         logging.warning(u"The input file is not defined.")
1650         return
1651     except csv.Error as err:
1652         logging.warning(
1653             f"Not possible to process the file {table[u'input-file']}.\n"
1654             f"{repr(err)}"
1655         )
1656         return
1657
1658     # Table:
1659     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1660
1661     # Table header:
1662     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1663     for idx, item in enumerate(csv_lst[0]):
1664         alignment = u"left" if idx == 0 else u"center"
1665         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1666         thead.text = item
1667
1668     # Rows:
1669     colors = (u"#e9f1fb", u"#d4e4f7")
1670     for r_idx, row in enumerate(csv_lst[1:]):
1671         background = colors[r_idx % 2]
1672         trow = ET.SubElement(
1673             failed_tests, u"tr", attrib=dict(bgcolor=background)
1674         )
1675
1676         # Columns:
1677         for c_idx, item in enumerate(row):
1678             tdata = ET.SubElement(
1679                 trow,
1680                 u"td",
1681                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1682             )
1683             # Name:
1684             if c_idx == 0 and table.get(u"add-links", True):
1685                 ref = ET.SubElement(
1686                     tdata,
1687                     u"a",
1688                     attrib=dict(
1689                         href=f"{lnk_dir}"
1690                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1691                         f"{lnk_sufix}"
1692                     )
1693                 )
1694                 ref.text = item
1695             else:
1696                 tdata.text = item
1697     try:
1698         with open(table[u"output-file"], u'w') as html_file:
1699             logging.info(f"    Writing file: {table[u'output-file']}")
1700             html_file.write(u".. raw:: html\n\n\t")
1701             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1702             html_file.write(u"\n\t<p><br><br></p>\n")
1703     except KeyError:
1704         logging.warning(u"The output file is not defined.")
1705         return
1706
1707
1708 def table_comparison(table, input_data):
1709     """Generate the table(s) with algorithm: table_comparison
1710     specified in the specification file.
1711
1712     :param table: Table to generate.
1713     :param input_data: Data to process.
1714     :type table: pandas.Series
1715     :type input_data: InputData
1716     """
1717     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1718
1719     # Transform the data
1720     logging.info(
1721         f"    Creating the data set for the {table.get(u'type', u'')} "
1722         f"{table.get(u'title', u'')}."
1723     )
1724
1725     columns = table.get(u"columns", None)
1726     if not columns:
1727         logging.error(
1728             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1729         )
1730         return
1731
1732     cols = list()
1733     for idx, col in enumerate(columns):
1734         if col.get(u"data-set", None) is None:
1735             logging.warning(f"No data for column {col.get(u'title', u'')}")
1736             continue
1737         tag = col.get(u"tag", None)
1738         data = input_data.filter_data(
1739             table,
1740             params=[
1741                 u"throughput",
1742                 u"result",
1743                 u"latency",
1744                 u"name",
1745                 u"parent",
1746                 u"tags"
1747             ],
1748             data=col[u"data-set"],
1749             continue_on_error=True
1750         )
1751         col_data = {
1752             u"title": col.get(u"title", f"Column{idx}"),
1753             u"data": dict()
1754         }
1755         for builds in data.values:
1756             for build in builds:
1757                 for tst_name, tst_data in build.items():
1758                     if tag and tag not in tst_data[u"tags"]:
1759                         continue
1760                     tst_name_mod = \
1761                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1762                         replace(u"2n1l-", u"")
1763                     if col_data[u"data"].get(tst_name_mod, None) is None:
1764                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1765                         if u"across testbeds" in table[u"title"].lower() or \
1766                                 u"across topologies" in table[u"title"].lower():
1767                             name = _tpc_modify_displayed_test_name(name)
1768                         col_data[u"data"][tst_name_mod] = {
1769                             u"name": name,
1770                             u"replace": True,
1771                             u"data": list(),
1772                             u"mean": None,
1773                             u"stdev": None
1774                         }
1775                     _tpc_insert_data(
1776                         target=col_data[u"data"][tst_name_mod],
1777                         src=tst_data,
1778                         include_tests=table[u"include-tests"]
1779                     )
1780
1781         replacement = col.get(u"data-replacement", None)
1782         if replacement:
1783             rpl_data = input_data.filter_data(
1784                 table,
1785                 params=[
1786                     u"throughput",
1787                     u"result",
1788                     u"latency",
1789                     u"name",
1790                     u"parent",
1791                     u"tags"
1792                 ],
1793                 data=replacement,
1794                 continue_on_error=True
1795             )
1796             for builds in rpl_data.values:
1797                 for build in builds:
1798                     for tst_name, tst_data in build.items():
1799                         if tag and tag not in tst_data[u"tags"]:
1800                             continue
1801                         tst_name_mod = \
1802                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1803                             replace(u"2n1l-", u"")
1804                         if col_data[u"data"].get(tst_name_mod, None) is None:
1805                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1806                             if u"across testbeds" in table[u"title"].lower() \
1807                                     or u"across topologies" in \
1808                                     table[u"title"].lower():
1809                                 name = _tpc_modify_displayed_test_name(name)
1810                             col_data[u"data"][tst_name_mod] = {
1811                                 u"name": name,
1812                                 u"replace": False,
1813                                 u"data": list(),
1814                                 u"mean": None,
1815                                 u"stdev": None
1816                             }
1817                         if col_data[u"data"][tst_name_mod][u"replace"]:
1818                             col_data[u"data"][tst_name_mod][u"replace"] = False
1819                             col_data[u"data"][tst_name_mod][u"data"] = list()
1820                         _tpc_insert_data(
1821                             target=col_data[u"data"][tst_name_mod],
1822                             src=tst_data,
1823                             include_tests=table[u"include-tests"]
1824                         )
1825
1826         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1827                 u"latency" in table[u"include-tests"]:
1828             for tst_name, tst_data in col_data[u"data"].items():
1829                 if tst_data[u"data"]:
1830                     tst_data[u"mean"] = mean(tst_data[u"data"])
1831                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1832
1833         cols.append(col_data)
1834
1835     tbl_dict = dict()
1836     for col in cols:
1837         for tst_name, tst_data in col[u"data"].items():
1838             if tbl_dict.get(tst_name, None) is None:
1839                 tbl_dict[tst_name] = {
1840                     "name": tst_data[u"name"]
1841                 }
1842             tbl_dict[tst_name][col[u"title"]] = {
1843                 u"mean": tst_data[u"mean"],
1844                 u"stdev": tst_data[u"stdev"]
1845             }
1846
1847     if not tbl_dict:
1848         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1849         return
1850
1851     tbl_lst = list()
1852     for tst_data in tbl_dict.values():
1853         row = [tst_data[u"name"], ]
1854         for col in cols:
1855             row.append(tst_data.get(col[u"title"], None))
1856         tbl_lst.append(row)
1857
1858     comparisons = table.get(u"comparisons", None)
1859     rcas = list()
1860     if comparisons and isinstance(comparisons, list):
1861         for idx, comp in enumerate(comparisons):
1862             try:
1863                 col_ref = int(comp[u"reference"])
1864                 col_cmp = int(comp[u"compare"])
1865             except KeyError:
1866                 logging.warning(u"Comparison: No references defined! Skipping.")
1867                 comparisons.pop(idx)
1868                 continue
1869             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1870                     col_ref == col_cmp):
1871                 logging.warning(f"Wrong values of reference={col_ref} "
1872                                 f"and/or compare={col_cmp}. Skipping.")
1873                 comparisons.pop(idx)
1874                 continue
1875             rca_file_name = comp.get(u"rca-file", None)
1876             if rca_file_name:
1877                 try:
1878                     with open(rca_file_name, u"r") as file_handler:
1879                         rcas.append(
1880                             {
1881                                 u"title": f"RCA{idx + 1}",
1882                                 u"data": load(file_handler, Loader=FullLoader)
1883                             }
1884                         )
1885                 except (YAMLError, IOError) as err:
1886                     logging.warning(
1887                         f"The RCA file {rca_file_name} does not exist or "
1888                         f"it is corrupted!"
1889                     )
1890                     logging.debug(repr(err))
1891                     rcas.append(None)
1892             else:
1893                 rcas.append(None)
1894     else:
1895         comparisons = None
1896
1897     tbl_cmp_lst = list()
1898     if comparisons:
1899         for row in tbl_lst:
1900             new_row = deepcopy(row)
1901             for comp in comparisons:
1902                 ref_itm = row[int(comp[u"reference"])]
1903                 if ref_itm is None and \
1904                         comp.get(u"reference-alt", None) is not None:
1905                     ref_itm = row[int(comp[u"reference-alt"])]
1906                 cmp_itm = row[int(comp[u"compare"])]
1907                 if ref_itm is not None and cmp_itm is not None and \
1908                         ref_itm[u"mean"] is not None and \
1909                         cmp_itm[u"mean"] is not None and \
1910                         ref_itm[u"stdev"] is not None and \
1911                         cmp_itm[u"stdev"] is not None:
1912                     try:
1913                         delta, d_stdev = relative_change_stdev(
1914                             ref_itm[u"mean"], cmp_itm[u"mean"],
1915                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1916                         )
1917                     except ZeroDivisionError:
1918                         break
1919                     if delta is None or math.isnan(delta):
1920                         break
1921                     new_row.append({
1922                         u"mean": delta * 1e6,
1923                         u"stdev": d_stdev * 1e6
1924                     })
1925                 else:
1926                     break
1927             else:
1928                 tbl_cmp_lst.append(new_row)
1929
1930     try:
1931         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1932         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1933     except TypeError as err:
1934         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1935
1936     tbl_for_csv = list()
1937     for line in tbl_cmp_lst:
1938         row = [line[0], ]
1939         for idx, itm in enumerate(line[1:]):
1940             if itm is None or not isinstance(itm, dict) or\
1941                     itm.get(u'mean', None) is None or \
1942                     itm.get(u'stdev', None) is None:
1943                 row.append(u"NT")
1944                 row.append(u"NT")
1945             else:
1946                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1947                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1948         for rca in rcas:
1949             if rca is None:
1950                 continue
1951             rca_nr = rca[u"data"].get(row[0], u"-")
1952             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1953         tbl_for_csv.append(row)
1954
1955     header_csv = [u"Test Case", ]
1956     for col in cols:
1957         header_csv.append(f"Avg({col[u'title']})")
1958         header_csv.append(f"Stdev({col[u'title']})")
1959     for comp in comparisons:
1960         header_csv.append(
1961             f"Avg({comp.get(u'title', u'')})"
1962         )
1963         header_csv.append(
1964             f"Stdev({comp.get(u'title', u'')})"
1965         )
1966     for rca in rcas:
1967         if rca:
1968             header_csv.append(rca[u"title"])
1969
1970     legend_lst = table.get(u"legend", None)
1971     if legend_lst is None:
1972         legend = u""
1973     else:
1974         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1975
1976     footnote = u""
1977     if rcas and any(rcas):
1978         footnote += u"\nRoot Cause Analysis:\n"
1979         for rca in rcas:
1980             if rca:
1981                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1982
1983     csv_file_name = f"{table[u'output-file']}-csv.csv"
1984     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1985         file_handler.write(
1986             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1987         )
1988         for test in tbl_for_csv:
1989             file_handler.write(
1990                 u",".join([f'"{item}"' for item in test]) + u"\n"
1991             )
1992         if legend_lst:
1993             for item in legend_lst:
1994                 file_handler.write(f'"{item}"\n')
1995         if footnote:
1996             for itm in footnote.split(u"\n"):
1997                 file_handler.write(f'"{itm}"\n')
1998
1999     tbl_tmp = list()
2000     max_lens = [0, ] * len(tbl_cmp_lst[0])
2001     for line in tbl_cmp_lst:
2002         row = [line[0], ]
2003         for idx, itm in enumerate(line[1:]):
2004             if itm is None or not isinstance(itm, dict) or \
2005                     itm.get(u'mean', None) is None or \
2006                     itm.get(u'stdev', None) is None:
2007                 new_itm = u"NT"
2008             else:
2009                 if idx < len(cols):
2010                     new_itm = (
2011                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
2012                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2013                         replace(u"nan", u"NaN")
2014                     )
2015                 else:
2016                     new_itm = (
2017                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2018                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2019                         replace(u"nan", u"NaN")
2020                     )
2021             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2022                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2023             row.append(new_itm)
2024
2025         tbl_tmp.append(row)
2026
2027     header = [u"Test Case", ]
2028     header.extend([col[u"title"] for col in cols])
2029     header.extend([comp.get(u"title", u"") for comp in comparisons])
2030
2031     tbl_final = list()
2032     for line in tbl_tmp:
2033         row = [line[0], ]
2034         for idx, itm in enumerate(line[1:]):
2035             if itm in (u"NT", u"NaN"):
2036                 row.append(itm)
2037                 continue
2038             itm_lst = itm.rsplit(u"\u00B1", 1)
2039             itm_lst[-1] = \
2040                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2041             itm_str = u"\u00B1".join(itm_lst)
2042
2043             if idx >= len(cols):
2044                 # Diffs
2045                 rca = rcas[idx - len(cols)]
2046                 if rca:
2047                     # Add rcas to diffs
2048                     rca_nr = rca[u"data"].get(row[0], None)
2049                     if rca_nr:
2050                         hdr_len = len(header[idx + 1]) - 1
2051                         if hdr_len < 19:
2052                             hdr_len = 19
2053                         rca_nr = f"[{rca_nr}]"
2054                         itm_str = (
2055                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2056                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2057                             f"{itm_str}"
2058                         )
2059             row.append(itm_str)
2060         tbl_final.append(row)
2061
2062     # Generate csv tables:
2063     csv_file_name = f"{table[u'output-file']}.csv"
2064     logging.info(f"    Writing the file {csv_file_name}")
2065     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2066         file_handler.write(u";".join(header) + u"\n")
2067         for test in tbl_final:
2068             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2069
2070     # Generate txt table:
2071     txt_file_name = f"{table[u'output-file']}.txt"
2072     logging.info(f"    Writing the file {txt_file_name}")
2073     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2074
2075     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2076         file_handler.write(legend)
2077         file_handler.write(footnote)
2078
2079     # Generate html table:
2080     _tpc_generate_html_table(
2081         header,
2082         tbl_final,
2083         table[u'output-file'],
2084         legend=legend,
2085         footnote=footnote,
2086         sort_data=False,
2087         title=table.get(u"title", u"")
2088     )
2089
2090
2091 def table_weekly_comparison(table, in_data):
2092     """Generate the table(s) with algorithm: table_weekly_comparison
2093     specified in the specification file.
2094
2095     :param table: Table to generate.
2096     :param in_data: Data to process.
2097     :type table: pandas.Series
2098     :type in_data: InputData
2099     """
2100     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2101
2102     # Transform the data
2103     logging.info(
2104         f"    Creating the data set for the {table.get(u'type', u'')} "
2105         f"{table.get(u'title', u'')}."
2106     )
2107
2108     incl_tests = table.get(u"include-tests", None)
2109     if incl_tests not in (u"NDR", u"PDR"):
2110         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2111         return
2112
2113     nr_cols = table.get(u"nr-of-data-columns", None)
2114     if not nr_cols or nr_cols < 2:
2115         logging.error(
2116             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2117         )
2118         return
2119
2120     data = in_data.filter_data(
2121         table,
2122         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2123         continue_on_error=True
2124     )
2125
2126     header = [
2127         [u"VPP Version", ],
2128         [u"Start Timestamp", ],
2129         [u"CSIT Build", ],
2130         [u"CSIT Testbed", ]
2131     ]
2132     tbl_dict = dict()
2133     idx = 0
2134     tb_tbl = table.get(u"testbeds", None)
2135     for job_name, job_data in data.items():
2136         for build_nr, build in job_data.items():
2137             if idx >= nr_cols:
2138                 break
2139             if build.empty:
2140                 continue
2141
2142             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2143             if tb_ip and tb_tbl:
2144                 testbed = tb_tbl.get(tb_ip, u"")
2145             else:
2146                 testbed = u""
2147             header[2].insert(1, build_nr)
2148             header[3].insert(1, testbed)
2149             header[1].insert(
2150                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2151             )
2152             header[0].insert(
2153                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2154             )
2155
2156             for tst_name, tst_data in build.items():
2157                 tst_name_mod = \
2158                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2159                 if not tbl_dict.get(tst_name_mod, None):
2160                     tbl_dict[tst_name_mod] = dict(
2161                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2162                     )
2163                 try:
2164                     tbl_dict[tst_name_mod][-idx - 1] = \
2165                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2166                 except (TypeError, IndexError, KeyError, ValueError):
2167                     pass
2168             idx += 1
2169
2170     if idx < nr_cols:
2171         logging.error(u"Not enough data to build the table! Skipping")
2172         return
2173
2174     cmp_dict = dict()
2175     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2176         idx_ref = cmp.get(u"reference", None)
2177         idx_cmp = cmp.get(u"compare", None)
2178         if idx_ref is None or idx_cmp is None:
2179             continue
2180         header[0].append(
2181             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2182             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2183         )
2184         header[1].append(u"")
2185         header[2].append(u"")
2186         header[3].append(u"")
2187         for tst_name, tst_data in tbl_dict.items():
2188             if not cmp_dict.get(tst_name, None):
2189                 cmp_dict[tst_name] = list()
2190             ref_data = tst_data.get(idx_ref, None)
2191             cmp_data = tst_data.get(idx_cmp, None)
2192             if ref_data is None or cmp_data is None:
2193                 cmp_dict[tst_name].append(float(u'nan'))
2194             else:
2195                 cmp_dict[tst_name].append(
2196                     relative_change(ref_data, cmp_data)
2197                 )
2198
2199     tbl_lst_none = list()
2200     tbl_lst = list()
2201     for tst_name, tst_data in tbl_dict.items():
2202         itm_lst = [tst_data[u"name"], ]
2203         for idx in range(nr_cols):
2204             item = tst_data.get(-idx - 1, None)
2205             if item is None:
2206                 itm_lst.insert(1, None)
2207             else:
2208                 itm_lst.insert(1, round(item / 1e6, 1))
2209         itm_lst.extend(
2210             [
2211                 None if itm is None else round(itm, 1)
2212                 for itm in cmp_dict[tst_name]
2213             ]
2214         )
2215         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2216             tbl_lst_none.append(itm_lst)
2217         else:
2218             tbl_lst.append(itm_lst)
2219
2220     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2221     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2222     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2223     tbl_lst.extend(tbl_lst_none)
2224
2225     # Generate csv table:
2226     csv_file_name = f"{table[u'output-file']}.csv"
2227     logging.info(f"    Writing the file {csv_file_name}")
2228     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2229         for hdr in header:
2230             file_handler.write(u",".join(hdr) + u"\n")
2231         for test in tbl_lst:
2232             file_handler.write(u",".join(
2233                 [
2234                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2235                     replace(u"null", u"-") for item in test
2236                 ]
2237             ) + u"\n")
2238
2239     txt_file_name = f"{table[u'output-file']}.txt"
2240     logging.info(f"    Writing the file {txt_file_name}")
2241     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2242
2243     # Reorganize header in txt table
2244     txt_table = list()
2245     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2246         for line in list(file_handler):
2247             txt_table.append(line)
2248     try:
2249         txt_table.insert(5, txt_table.pop(2))
2250         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2251             file_handler.writelines(txt_table)
2252     except IndexError:
2253         pass
2254
2255     # Generate html table:
2256     hdr_html = [
2257         u"<br>".join(row) for row in zip(*header)
2258     ]
2259     _tpc_generate_html_table(
2260         hdr_html,
2261         tbl_lst,
2262         table[u'output-file'],
2263         sort_data=True,
2264         title=table.get(u"title", u""),
2265         generate_rst=False
2266     )