0b063b10670aca8fd8356b7d5d7fea011d124e9c
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     if gdata[u"vectors"] > 0:
301                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
302                     elif gdata[u"calls"] > 0:
303                         clocks = gdata[u"clocks"] / gdata[u"calls"]
304                     elif gdata[u"suspends"] > 0:
305                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
306                     else:
307                         clocks = 0.0
308                     if gdata[u"calls"] > 0:
309                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
310                     else:
311                         vectors_call = 0.0
312                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
313                             int(gdata[u"suspends"]):
314                         threads[idx].append([
315                             gnode,
316                             int(gdata[u"calls"]),
317                             int(gdata[u"vectors"]),
318                             int(gdata[u"suspends"]),
319                             clocks,
320                             vectors_call
321                         ])
322
323             bold = ET.SubElement(tcol, u"b")
324             bold.text = (
325                 f"Host IP: {dut_data.get(u'host', '')}, "
326                 f"Socket: {dut_data.get(u'socket', '')}"
327             )
328             trow = ET.SubElement(
329                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
330             )
331             thead = ET.SubElement(
332                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
333             )
334             thead.text = u"\t"
335
336             for thread_nr, thread in threads.items():
337                 trow = ET.SubElement(
338                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
339                 )
340                 tcol = ET.SubElement(
341                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
342                 )
343                 bold = ET.SubElement(tcol, u"b")
344                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
345                 trow = ET.SubElement(
346                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
347                 )
348                 for idx, col in enumerate(tbl_hdr):
349                     tcol = ET.SubElement(
350                         trow, u"td",
351                         attrib=dict(align=u"right" if idx else u"left")
352                     )
353                     font = ET.SubElement(
354                         tcol, u"font", attrib=dict(size=u"2")
355                     )
356                     bold = ET.SubElement(font, u"b")
357                     bold.text = col
358                 for row_nr, row in enumerate(thread):
359                     trow = ET.SubElement(
360                         tbl, u"tr",
361                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
362                     )
363                     for idx, col in enumerate(row):
364                         tcol = ET.SubElement(
365                             trow, u"td",
366                             attrib=dict(align=u"right" if idx else u"left")
367                         )
368                         font = ET.SubElement(
369                             tcol, u"font", attrib=dict(size=u"2")
370                         )
371                         if isinstance(col, float):
372                             font.text = f"{col:.2f}"
373                         else:
374                             font.text = str(col)
375                 trow = ET.SubElement(
376                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
377                 )
378                 thead = ET.SubElement(
379                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
380                 )
381                 thead.text = u"\t"
382
383         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
384         thead = ET.SubElement(
385             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
386         )
387         font = ET.SubElement(
388             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
389         )
390         font.text = u"."
391
392         return str(ET.tostring(tbl, encoding=u"unicode"))
393
394     for suite in suites.values:
395         html_table = str()
396         for test_data in data.values:
397             if test_data[u"parent"] not in suite[u"name"]:
398                 continue
399             html_table += _generate_html_table(test_data)
400         if not html_table:
401             continue
402         try:
403             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
404             with open(f"{file_name}", u'w') as html_file:
405                 logging.info(f"    Writing file: {file_name}")
406                 html_file.write(u".. raw:: html\n\n\t")
407                 html_file.write(html_table)
408                 html_file.write(u"\n\t<p><br><br></p>\n")
409         except KeyError:
410             logging.warning(u"The output file is not defined.")
411             return
412     logging.info(u"  Done.")
413
414
415 def table_merged_details(table, input_data):
416     """Generate the table(s) with algorithm: table_merged_details
417     specified in the specification file.
418
419     :param table: Table to generate.
420     :param input_data: Data to process.
421     :type table: pandas.Series
422     :type input_data: InputData
423     """
424
425     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
426
427     # Transform the data
428     logging.info(
429         f"    Creating the data set for the {table.get(u'type', u'')} "
430         f"{table.get(u'title', u'')}."
431     )
432     data = input_data.filter_data(table, continue_on_error=True)
433     data = input_data.merge_data(data)
434
435     sort_tests = table.get(u"sort", None)
436     if sort_tests:
437         args = dict(
438             inplace=True,
439             ascending=(sort_tests == u"ascending")
440         )
441         data.sort_index(**args)
442
443     suites = input_data.filter_data(
444         table, continue_on_error=True, data_set=u"suites")
445     suites = input_data.merge_data(suites)
446
447     # Prepare the header of the tables
448     header = list()
449     for column in table[u"columns"]:
450         header.append(
451             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
452         )
453
454     for suite in suites.values:
455         # Generate data
456         suite_name = suite[u"name"]
457         table_lst = list()
458         for test in data.keys():
459             if data[test][u"status"] != u"PASS" or \
460                     data[test][u"parent"] not in suite_name:
461                 continue
462             row_lst = list()
463             for column in table[u"columns"]:
464                 try:
465                     col_data = str(data[test][column[
466                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
467                     # Do not include tests with "Test Failed" in test message
468                     if u"Test Failed" in col_data:
469                         continue
470                     col_data = col_data.replace(
471                         u"No Data", u"Not Captured     "
472                     )
473                     if column[u"data"].split(u" ")[1] in (u"name", ):
474                         if len(col_data) > 30:
475                             col_data_lst = col_data.split(u"-")
476                             half = int(len(col_data_lst) / 2)
477                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
478                                        f"- |br| " \
479                                        f"{u'-'.join(col_data_lst[half:])}"
480                         col_data = f" |prein| {col_data} |preout| "
481                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
482                         # Temporary solution: remove NDR results from message:
483                         if bool(table.get(u'remove-ndr', False)):
484                             try:
485                                 col_data = col_data.split(u"\n", 1)[1]
486                             except IndexError:
487                                 pass
488                         col_data = col_data.replace(u'\n', u' |br| ').\
489                             replace(u'\r', u'').replace(u'"', u"'")
490                         col_data = f" |prein| {col_data} |preout| "
491                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
492                         col_data = col_data.replace(u'\n', u' |br| ')
493                         col_data = f" |prein| {col_data[:-5]} |preout| "
494                     row_lst.append(f'"{col_data}"')
495                 except KeyError:
496                     row_lst.append(u'"Not captured"')
497             if len(row_lst) == len(table[u"columns"]):
498                 table_lst.append(row_lst)
499
500         # Write the data to file
501         if table_lst:
502             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
503             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
504             logging.info(f"      Writing file: {file_name}")
505             with open(file_name, u"wt") as file_handler:
506                 file_handler.write(u",".join(header) + u"\n")
507                 for item in table_lst:
508                     file_handler.write(u",".join(item) + u"\n")
509
510     logging.info(u"  Done.")
511
512
513 def _tpc_modify_test_name(test_name, ignore_nic=False):
514     """Modify a test name by replacing its parts.
515
516     :param test_name: Test name to be modified.
517     :param ignore_nic: If True, NIC is removed from TC name.
518     :type test_name: str
519     :type ignore_nic: bool
520     :returns: Modified test name.
521     :rtype: str
522     """
523     test_name_mod = test_name.\
524         replace(u"-ndrpdr", u"").\
525         replace(u"1t1c", u"1c").\
526         replace(u"2t1c", u"1c"). \
527         replace(u"2t2c", u"2c").\
528         replace(u"4t2c", u"2c"). \
529         replace(u"4t4c", u"4c").\
530         replace(u"8t4c", u"4c")
531
532     if ignore_nic:
533         return re.sub(REGEX_NIC, u"", test_name_mod)
534     return test_name_mod
535
536
537 def _tpc_modify_displayed_test_name(test_name):
538     """Modify a test name which is displayed in a table by replacing its parts.
539
540     :param test_name: Test name to be modified.
541     :type test_name: str
542     :returns: Modified test name.
543     :rtype: str
544     """
545     return test_name.\
546         replace(u"1t1c", u"1c").\
547         replace(u"2t1c", u"1c"). \
548         replace(u"2t2c", u"2c").\
549         replace(u"4t2c", u"2c"). \
550         replace(u"4t4c", u"4c").\
551         replace(u"8t4c", u"4c")
552
553
554 def _tpc_insert_data(target, src, include_tests):
555     """Insert src data to the target structure.
556
557     :param target: Target structure where the data is placed.
558     :param src: Source data to be placed into the target structure.
559     :param include_tests: Which results will be included (MRR, NDR, PDR).
560     :type target: list
561     :type src: dict
562     :type include_tests: str
563     """
564     try:
565         if include_tests == u"MRR":
566             target[u"mean"] = src[u"result"][u"receive-rate"]
567             target[u"stdev"] = src[u"result"][u"receive-stdev"]
568         elif include_tests == u"PDR":
569             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
570         elif include_tests == u"NDR":
571             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
572         elif u"latency" in include_tests:
573             keys = include_tests.split(u"-")
574             if len(keys) == 4:
575                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
576                 target[u"data"].append(
577                     float(u"nan") if lat == -1 else lat * 1e6
578                 )
579     except (KeyError, TypeError):
580         pass
581
582
583 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
584                              footnote=u"", sort_data=True, title=u"",
585                              generate_rst=True):
586     """Generate html table from input data with simple sorting possibility.
587
588     :param header: Table header.
589     :param data: Input data to be included in the table. It is a list of lists.
590         Inner lists are rows in the table. All inner lists must be of the same
591         length. The length of these lists must be the same as the length of the
592         header.
593     :param out_file_name: The name (relative or full path) where the
594         generated html table is written.
595     :param legend: The legend to display below the table.
596     :param footnote: The footnote to display below the table (and legend).
597     :param sort_data: If True the data sorting is enabled.
598     :param title: The table (and file) title.
599     :param generate_rst: If True, wrapping rst file is generated.
600     :type header: list
601     :type data: list of lists
602     :type out_file_name: str
603     :type legend: str
604     :type footnote: str
605     :type sort_data: bool
606     :type title: str
607     :type generate_rst: bool
608     """
609
610     try:
611         idx = header.index(u"Test Case")
612     except ValueError:
613         idx = 0
614     params = {
615         u"align-hdr": (
616             [u"left", u"right"],
617             [u"left", u"left", u"right"],
618             [u"left", u"left", u"left", u"right"]
619         ),
620         u"align-itm": (
621             [u"left", u"right"],
622             [u"left", u"left", u"right"],
623             [u"left", u"left", u"left", u"right"]
624         ),
625         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
626     }
627
628     df_data = pd.DataFrame(data, columns=header)
629
630     if sort_data:
631         df_sorted = [df_data.sort_values(
632             by=[key, header[idx]], ascending=[True, True]
633             if key != header[idx] else [False, True]) for key in header]
634         df_sorted_rev = [df_data.sort_values(
635             by=[key, header[idx]], ascending=[False, True]
636             if key != header[idx] else [True, True]) for key in header]
637         df_sorted.extend(df_sorted_rev)
638     else:
639         df_sorted = df_data
640
641     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
642                    for idx in range(len(df_data))]]
643     table_header = dict(
644         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
645         fill_color=u"#7eade7",
646         align=params[u"align-hdr"][idx],
647         font=dict(
648             family=u"Courier New",
649             size=12
650         )
651     )
652
653     fig = go.Figure()
654
655     if sort_data:
656         for table in df_sorted:
657             columns = [table.get(col) for col in header]
658             fig.add_trace(
659                 go.Table(
660                     columnwidth=params[u"width"][idx],
661                     header=table_header,
662                     cells=dict(
663                         values=columns,
664                         fill_color=fill_color,
665                         align=params[u"align-itm"][idx],
666                         font=dict(
667                             family=u"Courier New",
668                             size=12
669                         )
670                     )
671                 )
672             )
673
674         buttons = list()
675         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
676         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
677         for idx, hdr in enumerate(menu_items):
678             visible = [False, ] * len(menu_items)
679             visible[idx] = True
680             buttons.append(
681                 dict(
682                     label=hdr.replace(u" [Mpps]", u""),
683                     method=u"update",
684                     args=[{u"visible": visible}],
685                 )
686             )
687
688         fig.update_layout(
689             updatemenus=[
690                 go.layout.Updatemenu(
691                     type=u"dropdown",
692                     direction=u"down",
693                     x=0.0,
694                     xanchor=u"left",
695                     y=1.002,
696                     yanchor=u"bottom",
697                     active=len(menu_items) - 1,
698                     buttons=list(buttons)
699                 )
700             ],
701         )
702     else:
703         fig.add_trace(
704             go.Table(
705                 columnwidth=params[u"width"][idx],
706                 header=table_header,
707                 cells=dict(
708                     values=[df_sorted.get(col) for col in header],
709                     fill_color=fill_color,
710                     align=params[u"align-itm"][idx],
711                     font=dict(
712                         family=u"Courier New",
713                         size=12
714                     )
715                 )
716             )
717         )
718
719     ploff.plot(
720         fig,
721         show_link=False,
722         auto_open=False,
723         filename=f"{out_file_name}_in.html"
724     )
725
726     if not generate_rst:
727         return
728
729     file_name = out_file_name.split(u"/")[-1]
730     if u"vpp" in out_file_name:
731         path = u"_tmp/src/vpp_performance_tests/comparisons/"
732     else:
733         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
734     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
735     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
736         rst_file.write(
737             u"\n"
738             u".. |br| raw:: html\n\n    <br />\n\n\n"
739             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
740             u".. |preout| raw:: html\n\n    </pre>\n\n"
741         )
742         if title:
743             rst_file.write(f"{title}\n")
744             rst_file.write(f"{u'`' * len(title)}\n\n")
745         rst_file.write(
746             u".. raw:: html\n\n"
747             f'    <iframe frameborder="0" scrolling="no" '
748             f'width="1600" height="1200" '
749             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
750             f'</iframe>\n\n'
751         )
752
753         if legend:
754             try:
755                 itm_lst = legend[1:-2].split(u"\n")
756                 rst_file.write(
757                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
758                 )
759             except IndexError as err:
760                 logging.error(f"Legend cannot be written to html file\n{err}")
761         if footnote:
762             try:
763                 itm_lst = footnote[1:].split(u"\n")
764                 rst_file.write(
765                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
766                 )
767             except IndexError as err:
768                 logging.error(f"Footnote cannot be written to html file\n{err}")
769
770
771 def table_soak_vs_ndr(table, input_data):
772     """Generate the table(s) with algorithm: table_soak_vs_ndr
773     specified in the specification file.
774
775     :param table: Table to generate.
776     :param input_data: Data to process.
777     :type table: pandas.Series
778     :type input_data: InputData
779     """
780
781     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
782
783     # Transform the data
784     logging.info(
785         f"    Creating the data set for the {table.get(u'type', u'')} "
786         f"{table.get(u'title', u'')}."
787     )
788     data = input_data.filter_data(table, continue_on_error=True)
789
790     # Prepare the header of the table
791     try:
792         header = [
793             u"Test Case",
794             f"Avg({table[u'reference'][u'title']})",
795             f"Stdev({table[u'reference'][u'title']})",
796             f"Avg({table[u'compare'][u'title']})",
797             f"Stdev{table[u'compare'][u'title']})",
798             u"Diff",
799             u"Stdev(Diff)"
800         ]
801         header_str = u";".join(header) + u"\n"
802         legend = (
803             u"\nLegend:\n"
804             f"Avg({table[u'reference'][u'title']}): "
805             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
806             f"from a series of runs of the listed tests.\n"
807             f"Stdev({table[u'reference'][u'title']}): "
808             f"Standard deviation value of {table[u'reference'][u'title']} "
809             f"[Mpps] computed from a series of runs of the listed tests.\n"
810             f"Avg({table[u'compare'][u'title']}): "
811             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
812             f"a series of runs of the listed tests.\n"
813             f"Stdev({table[u'compare'][u'title']}): "
814             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
815             f"computed from a series of runs of the listed tests.\n"
816             f"Diff({table[u'reference'][u'title']},"
817             f"{table[u'compare'][u'title']}): "
818             f"Percentage change calculated for mean values.\n"
819             u"Stdev(Diff): "
820             u"Standard deviation of percentage change calculated for mean "
821             u"values."
822         )
823     except (AttributeError, KeyError) as err:
824         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
825         return
826
827     # Create a list of available SOAK test results:
828     tbl_dict = dict()
829     for job, builds in table[u"compare"][u"data"].items():
830         for build in builds:
831             for tst_name, tst_data in data[job][str(build)].items():
832                 if tst_data[u"type"] == u"SOAK":
833                     tst_name_mod = tst_name.replace(u"-soak", u"")
834                     if tbl_dict.get(tst_name_mod, None) is None:
835                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
836                         nic = groups.group(0) if groups else u""
837                         name = (
838                             f"{nic}-"
839                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
840                         )
841                         tbl_dict[tst_name_mod] = {
842                             u"name": name,
843                             u"ref-data": list(),
844                             u"cmp-data": list()
845                         }
846                     try:
847                         tbl_dict[tst_name_mod][u"cmp-data"].append(
848                             tst_data[u"throughput"][u"LOWER"])
849                     except (KeyError, TypeError):
850                         pass
851     tests_lst = tbl_dict.keys()
852
853     # Add corresponding NDR test results:
854     for job, builds in table[u"reference"][u"data"].items():
855         for build in builds:
856             for tst_name, tst_data in data[job][str(build)].items():
857                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
858                     replace(u"-mrr", u"")
859                 if tst_name_mod not in tests_lst:
860                     continue
861                 try:
862                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
863                         continue
864                     if table[u"include-tests"] == u"MRR":
865                         result = (tst_data[u"result"][u"receive-rate"],
866                                   tst_data[u"result"][u"receive-stdev"])
867                     elif table[u"include-tests"] == u"PDR":
868                         result = \
869                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
870                     elif table[u"include-tests"] == u"NDR":
871                         result = \
872                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
873                     else:
874                         result = None
875                     if result is not None:
876                         tbl_dict[tst_name_mod][u"ref-data"].append(
877                             result)
878                 except (KeyError, TypeError):
879                     continue
880
881     tbl_lst = list()
882     for tst_name in tbl_dict:
883         item = [tbl_dict[tst_name][u"name"], ]
884         data_r = tbl_dict[tst_name][u"ref-data"]
885         if data_r:
886             if table[u"include-tests"] == u"MRR":
887                 data_r_mean = data_r[0][0]
888                 data_r_stdev = data_r[0][1]
889             else:
890                 data_r_mean = mean(data_r)
891                 data_r_stdev = stdev(data_r)
892             item.append(round(data_r_mean / 1e6, 1))
893             item.append(round(data_r_stdev / 1e6, 1))
894         else:
895             data_r_mean = None
896             data_r_stdev = None
897             item.extend([None, None])
898         data_c = tbl_dict[tst_name][u"cmp-data"]
899         if data_c:
900             if table[u"include-tests"] == u"MRR":
901                 data_c_mean = data_c[0][0]
902                 data_c_stdev = data_c[0][1]
903             else:
904                 data_c_mean = mean(data_c)
905                 data_c_stdev = stdev(data_c)
906             item.append(round(data_c_mean / 1e6, 1))
907             item.append(round(data_c_stdev / 1e6, 1))
908         else:
909             data_c_mean = None
910             data_c_stdev = None
911             item.extend([None, None])
912         if data_r_mean is not None and data_c_mean is not None:
913             delta, d_stdev = relative_change_stdev(
914                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
915             try:
916                 item.append(round(delta))
917             except ValueError:
918                 item.append(delta)
919             try:
920                 item.append(round(d_stdev))
921             except ValueError:
922                 item.append(d_stdev)
923             tbl_lst.append(item)
924
925     # Sort the table according to the relative change
926     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
927
928     # Generate csv tables:
929     csv_file_name = f"{table[u'output-file']}.csv"
930     with open(csv_file_name, u"wt") as file_handler:
931         file_handler.write(header_str)
932         for test in tbl_lst:
933             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
934
935     convert_csv_to_pretty_txt(
936         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
937     )
938     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
939         file_handler.write(legend)
940
941     # Generate html table:
942     _tpc_generate_html_table(
943         header,
944         tbl_lst,
945         table[u'output-file'],
946         legend=legend,
947         title=table.get(u"title", u"")
948     )
949
950
951 def table_perf_trending_dash(table, input_data):
952     """Generate the table(s) with algorithm:
953     table_perf_trending_dash
954     specified in the specification file.
955
956     :param table: Table to generate.
957     :param input_data: Data to process.
958     :type table: pandas.Series
959     :type input_data: InputData
960     """
961
962     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
963
964     # Transform the data
965     logging.info(
966         f"    Creating the data set for the {table.get(u'type', u'')} "
967         f"{table.get(u'title', u'')}."
968     )
969     data = input_data.filter_data(table, continue_on_error=True)
970
971     # Prepare the header of the tables
972     header = [
973         u"Test Case",
974         u"Trend [Mpps]",
975         u"Number of runs [#]",
976         u"Trend Change [%]",
977         u"Regressions [#]",
978         u"Progressions [#]"
979     ]
980     header_str = u",".join(header) + u"\n"
981
982     incl_tests = table.get(u"include-tests", u"MRR")
983
984     # Prepare data to the table:
985     tbl_dict = dict()
986     for job, builds in table[u"data"].items():
987         for build in builds:
988             for tst_name, tst_data in data[job][str(build)].items():
989                 if tst_name.lower() in table.get(u"ignore-list", list()):
990                     continue
991                 if tbl_dict.get(tst_name, None) is None:
992                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
993                     if not groups:
994                         continue
995                     nic = groups.group(0)
996                     tbl_dict[tst_name] = {
997                         u"name": f"{nic}-{tst_data[u'name']}",
998                         u"data": OrderedDict()
999                     }
1000                 try:
1001                     if incl_tests == u"MRR":
1002                         tbl_dict[tst_name][u"data"][str(build)] = \
1003                             tst_data[u"result"][u"receive-rate"]
1004                     elif incl_tests == u"NDR":
1005                         tbl_dict[tst_name][u"data"][str(build)] = \
1006                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1007                     elif incl_tests == u"PDR":
1008                         tbl_dict[tst_name][u"data"][str(build)] = \
1009                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1010                 except (TypeError, KeyError):
1011                     pass  # No data in output.xml for this test
1012
1013     tbl_lst = list()
1014     for tst_name in tbl_dict:
1015         data_t = tbl_dict[tst_name][u"data"]
1016         if len(data_t) < 2:
1017             continue
1018
1019         try:
1020             classification_lst, avgs, _ = classify_anomalies(data_t)
1021         except ValueError as err:
1022             logging.info(f"{err} Skipping")
1023             return
1024
1025         win_size = min(len(data_t), table[u"window"])
1026         long_win_size = min(len(data_t), table[u"long-trend-window"])
1027
1028         try:
1029             max_long_avg = max(
1030                 [x for x in avgs[-long_win_size:-win_size]
1031                  if not isnan(x)])
1032         except ValueError:
1033             max_long_avg = nan
1034         last_avg = avgs[-1]
1035         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1036
1037         nr_of_last_avgs = 0;
1038         for x in reversed(avgs):
1039             if x == last_avg:
1040                 nr_of_last_avgs += 1
1041             else:
1042                 break
1043
1044         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1045             rel_change_last = nan
1046         else:
1047             rel_change_last = round(
1048                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1049
1050         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1051             rel_change_long = nan
1052         else:
1053             rel_change_long = round(
1054                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1055
1056         if classification_lst:
1057             if isnan(rel_change_last) and isnan(rel_change_long):
1058                 continue
1059             if isnan(last_avg) or isnan(rel_change_last) or \
1060                     isnan(rel_change_long):
1061                 continue
1062             tbl_lst.append(
1063                 [tbl_dict[tst_name][u"name"],
1064                  round(last_avg / 1e6, 2),
1065                  nr_of_last_avgs,
1066                  rel_change_long,
1067                  classification_lst[-win_size+1:].count(u"regression"),
1068                  classification_lst[-win_size+1:].count(u"progression")])
1069
1070     tbl_lst.sort(key=lambda rel: rel[0])
1071     tbl_lst.sort(key=lambda rel: rel[2])
1072     tbl_lst.sort(key=lambda rel: rel[3])
1073     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1074     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1075
1076     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1077
1078     logging.info(f"    Writing file: {file_name}")
1079     with open(file_name, u"wt") as file_handler:
1080         file_handler.write(header_str)
1081         for test in tbl_lst:
1082             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1083
1084     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1085     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1086
1087
1088 def _generate_url(testbed, test_name):
1089     """Generate URL to a trending plot from the name of the test case.
1090
1091     :param testbed: The testbed used for testing.
1092     :param test_name: The name of the test case.
1093     :type testbed: str
1094     :type test_name: str
1095     :returns: The URL to the plot with the trending data for the given test
1096         case.
1097     :rtype str
1098     """
1099
1100     if u"x520" in test_name:
1101         nic = u"x520"
1102     elif u"x710" in test_name:
1103         nic = u"x710"
1104     elif u"xl710" in test_name:
1105         nic = u"xl710"
1106     elif u"xxv710" in test_name:
1107         nic = u"xxv710"
1108     elif u"vic1227" in test_name:
1109         nic = u"vic1227"
1110     elif u"vic1385" in test_name:
1111         nic = u"vic1385"
1112     elif u"x553" in test_name:
1113         nic = u"x553"
1114     elif u"cx556" in test_name or u"cx556a" in test_name:
1115         nic = u"cx556a"
1116     elif u"ena" in test_name:
1117         nic = u"nitro50g"
1118     else:
1119         nic = u""
1120
1121     if u"64b" in test_name:
1122         frame_size = u"64b"
1123     elif u"78b" in test_name:
1124         frame_size = u"78b"
1125     elif u"imix" in test_name:
1126         frame_size = u"imix"
1127     elif u"9000b" in test_name:
1128         frame_size = u"9000b"
1129     elif u"1518b" in test_name:
1130         frame_size = u"1518b"
1131     elif u"114b" in test_name:
1132         frame_size = u"114b"
1133     else:
1134         frame_size = u""
1135
1136     if u"1t1c" in test_name or \
1137         (u"-1c-" in test_name and
1138          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1139         cores = u"1t1c"
1140     elif u"2t2c" in test_name or \
1141          (u"-2c-" in test_name and
1142           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1143         cores = u"2t2c"
1144     elif u"4t4c" in test_name or \
1145          (u"-4c-" in test_name and
1146           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1147         cores = u"4t4c"
1148     elif u"2t1c" in test_name or \
1149          (u"-1c-" in test_name and
1150           testbed in
1151           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1152         cores = u"2t1c"
1153     elif u"4t2c" in test_name or \
1154          (u"-2c-" in test_name and
1155           testbed in
1156           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1157         cores = u"4t2c"
1158     elif u"8t4c" in test_name or \
1159          (u"-4c-" in test_name and
1160           testbed in
1161           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1162         cores = u"8t4c"
1163     else:
1164         cores = u""
1165
1166     if u"testpmd" in test_name:
1167         driver = u"testpmd"
1168     elif u"l3fwd" in test_name:
1169         driver = u"l3fwd"
1170     elif u"avf" in test_name:
1171         driver = u"avf"
1172     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1173         driver = u"af_xdp"
1174     elif u"rdma" in test_name:
1175         driver = u"rdma"
1176     elif u"dnv" in testbed or u"tsh" in testbed:
1177         driver = u"ixgbe"
1178     elif u"ena" in test_name:
1179         driver = u"ena"
1180     else:
1181         driver = u"dpdk"
1182
1183     if u"macip-iacl1s" in test_name:
1184         bsf = u"features-macip-iacl1"
1185     elif u"macip-iacl10s" in test_name:
1186         bsf = u"features-macip-iacl10"
1187     elif u"macip-iacl50s" in test_name:
1188         bsf = u"features-macip-iacl50"
1189     elif u"iacl1s" in test_name:
1190         bsf = u"features-iacl1"
1191     elif u"iacl10s" in test_name:
1192         bsf = u"features-iacl10"
1193     elif u"iacl50s" in test_name:
1194         bsf = u"features-iacl50"
1195     elif u"oacl1s" in test_name:
1196         bsf = u"features-oacl1"
1197     elif u"oacl10s" in test_name:
1198         bsf = u"features-oacl10"
1199     elif u"oacl50s" in test_name:
1200         bsf = u"features-oacl50"
1201     elif u"nat44det" in test_name:
1202         bsf = u"nat44det-bidir"
1203     elif u"nat44ed" in test_name and u"udir" in test_name:
1204         bsf = u"nat44ed-udir"
1205     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1206         bsf = u"udp-cps"
1207     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1208         bsf = u"tcp-cps"
1209     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1210         bsf = u"udp-pps"
1211     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1212         bsf = u"tcp-pps"
1213     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1214         bsf = u"udp-tput"
1215     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1216         bsf = u"tcp-tput"
1217     elif u"udpsrcscale" in test_name:
1218         bsf = u"features-udp"
1219     elif u"iacl" in test_name:
1220         bsf = u"features"
1221     elif u"policer" in test_name:
1222         bsf = u"features"
1223     elif u"adl" in test_name:
1224         bsf = u"features"
1225     elif u"cop" in test_name:
1226         bsf = u"features"
1227     elif u"nat" in test_name:
1228         bsf = u"features"
1229     elif u"macip" in test_name:
1230         bsf = u"features"
1231     elif u"scale" in test_name:
1232         bsf = u"scale"
1233     elif u"base" in test_name:
1234         bsf = u"base"
1235     else:
1236         bsf = u"base"
1237
1238     if u"114b" in test_name and u"vhost" in test_name:
1239         domain = u"vts"
1240     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1241         domain = u"nat44"
1242         if u"nat44det" in test_name:
1243             domain += u"-det-bidir"
1244         else:
1245             domain += u"-ed"
1246         if u"udir" in test_name:
1247             domain += u"-unidir"
1248         elif u"-ethip4udp-" in test_name:
1249             domain += u"-udp"
1250         elif u"-ethip4tcp-" in test_name:
1251             domain += u"-tcp"
1252         if u"-cps" in test_name:
1253             domain += u"-cps"
1254         elif u"-pps" in test_name:
1255             domain += u"-pps"
1256         elif u"-tput" in test_name:
1257             domain += u"-tput"
1258     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1259         domain = u"dpdk"
1260     elif u"memif" in test_name:
1261         domain = u"container_memif"
1262     elif u"srv6" in test_name:
1263         domain = u"srv6"
1264     elif u"vhost" in test_name:
1265         domain = u"vhost"
1266         if u"vppl2xc" in test_name:
1267             driver += u"-vpp"
1268         else:
1269             driver += u"-testpmd"
1270         if u"lbvpplacp" in test_name:
1271             bsf += u"-link-bonding"
1272     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1273         domain = u"nf_service_density_vnfc"
1274     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1275         domain = u"nf_service_density_cnfc"
1276     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1277         domain = u"nf_service_density_cnfp"
1278     elif u"ipsec" in test_name:
1279         domain = u"ipsec"
1280         if u"sw" in test_name:
1281             bsf += u"-sw"
1282         elif u"hw" in test_name:
1283             bsf += u"-hw"
1284         elif u"spe" in test_name:
1285             bsf += u"-spe"
1286     elif u"ethip4vxlan" in test_name:
1287         domain = u"ip4_tunnels"
1288     elif u"ethip4udpgeneve" in test_name:
1289         domain = u"ip4_tunnels"
1290     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1291         domain = u"ip4"
1292     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1293         domain = u"ip6"
1294     elif u"l2xcbase" in test_name or \
1295             u"l2xcscale" in test_name or \
1296             u"l2bdbasemaclrn" in test_name or \
1297             u"l2bdscale" in test_name or \
1298             u"l2patch" in test_name:
1299         domain = u"l2"
1300     else:
1301         domain = u""
1302
1303     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1304     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1305
1306     return file_name + anchor_name
1307
1308
1309 def table_perf_trending_dash_html(table, input_data):
1310     """Generate the table(s) with algorithm:
1311     table_perf_trending_dash_html specified in the specification
1312     file.
1313
1314     :param table: Table to generate.
1315     :param input_data: Data to process.
1316     :type table: dict
1317     :type input_data: InputData
1318     """
1319
1320     _ = input_data
1321
1322     if not table.get(u"testbed", None):
1323         logging.error(
1324             f"The testbed is not defined for the table "
1325             f"{table.get(u'title', u'')}. Skipping."
1326         )
1327         return
1328
1329     test_type = table.get(u"test-type", u"MRR")
1330     if test_type not in (u"MRR", u"NDR", u"PDR"):
1331         logging.error(
1332             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1333             f"Skipping."
1334         )
1335         return
1336
1337     if test_type in (u"NDR", u"PDR"):
1338         lnk_dir = u"../ndrpdr_trending/"
1339         lnk_sufix = f"-{test_type.lower()}"
1340     else:
1341         lnk_dir = u"../trending/"
1342         lnk_sufix = u""
1343
1344     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1345
1346     try:
1347         with open(table[u"input-file"], u'rt') as csv_file:
1348             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1349     except FileNotFoundError as err:
1350         logging.warning(f"{err}")
1351         return
1352     except KeyError:
1353         logging.warning(u"The input file is not defined.")
1354         return
1355     except csv.Error as err:
1356         logging.warning(
1357             f"Not possible to process the file {table[u'input-file']}.\n"
1358             f"{repr(err)}"
1359         )
1360         return
1361
1362     # Table:
1363     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1364
1365     # Table header:
1366     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1367     for idx, item in enumerate(csv_lst[0]):
1368         alignment = u"left" if idx == 0 else u"center"
1369         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1370         thead.text = item
1371
1372     # Rows:
1373     colors = {
1374         u"regression": (
1375             u"#ffcccc",
1376             u"#ff9999"
1377         ),
1378         u"progression": (
1379             u"#c6ecc6",
1380             u"#9fdf9f"
1381         ),
1382         u"normal": (
1383             u"#e9f1fb",
1384             u"#d4e4f7"
1385         )
1386     }
1387     for r_idx, row in enumerate(csv_lst[1:]):
1388         if int(row[4]):
1389             color = u"regression"
1390         elif int(row[5]):
1391             color = u"progression"
1392         else:
1393             color = u"normal"
1394         trow = ET.SubElement(
1395             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1396         )
1397
1398         # Columns:
1399         for c_idx, item in enumerate(row):
1400             tdata = ET.SubElement(
1401                 trow,
1402                 u"td",
1403                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1404             )
1405             # Name:
1406             if c_idx == 0 and table.get(u"add-links", True):
1407                 ref = ET.SubElement(
1408                     tdata,
1409                     u"a",
1410                     attrib=dict(
1411                         href=f"{lnk_dir}"
1412                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1413                         f"{lnk_sufix}"
1414                     )
1415                 )
1416                 ref.text = item
1417             else:
1418                 tdata.text = item
1419     try:
1420         with open(table[u"output-file"], u'w') as html_file:
1421             logging.info(f"    Writing file: {table[u'output-file']}")
1422             html_file.write(u".. raw:: html\n\n\t")
1423             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1424             html_file.write(u"\n\t<p><br><br></p>\n")
1425     except KeyError:
1426         logging.warning(u"The output file is not defined.")
1427         return
1428
1429
1430 def table_last_failed_tests(table, input_data):
1431     """Generate the table(s) with algorithm: table_last_failed_tests
1432     specified in the specification file.
1433
1434     :param table: Table to generate.
1435     :param input_data: Data to process.
1436     :type table: pandas.Series
1437     :type input_data: InputData
1438     """
1439
1440     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1441
1442     # Transform the data
1443     logging.info(
1444         f"    Creating the data set for the {table.get(u'type', u'')} "
1445         f"{table.get(u'title', u'')}."
1446     )
1447
1448     data = input_data.filter_data(table, continue_on_error=True)
1449
1450     if data is None or data.empty:
1451         logging.warning(
1452             f"    No data for the {table.get(u'type', u'')} "
1453             f"{table.get(u'title', u'')}."
1454         )
1455         return
1456
1457     tbl_list = list()
1458     for job, builds in table[u"data"].items():
1459         for build in builds:
1460             build = str(build)
1461             try:
1462                 version = input_data.metadata(job, build).get(u"version", u"")
1463                 duration = \
1464                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1465             except KeyError:
1466                 logging.error(f"Data for {job}: {build} is not present.")
1467                 return
1468             tbl_list.append(build)
1469             tbl_list.append(version)
1470             failed_tests = list()
1471             passed = 0
1472             failed = 0
1473             for tst_data in data[job][build].values:
1474                 if tst_data[u"status"] != u"FAIL":
1475                     passed += 1
1476                     continue
1477                 failed += 1
1478                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1479                 if not groups:
1480                     continue
1481                 nic = groups.group(0)
1482                 msg = tst_data[u'msg'].replace(u"\n", u"")
1483                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1484                              'xxx.xxx.xxx.xxx', msg)
1485                 msg = msg.split(u'Also teardown failed')[0]
1486                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1487             tbl_list.append(passed)
1488             tbl_list.append(failed)
1489             tbl_list.append(duration)
1490             tbl_list.extend(failed_tests)
1491
1492     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1493     logging.info(f"    Writing file: {file_name}")
1494     with open(file_name, u"wt") as file_handler:
1495         for test in tbl_list:
1496             file_handler.write(f"{test}\n")
1497
1498
1499 def table_failed_tests(table, input_data):
1500     """Generate the table(s) with algorithm: table_failed_tests
1501     specified in the specification file.
1502
1503     :param table: Table to generate.
1504     :param input_data: Data to process.
1505     :type table: pandas.Series
1506     :type input_data: InputData
1507     """
1508
1509     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1510
1511     # Transform the data
1512     logging.info(
1513         f"    Creating the data set for the {table.get(u'type', u'')} "
1514         f"{table.get(u'title', u'')}."
1515     )
1516     data = input_data.filter_data(table, continue_on_error=True)
1517
1518     test_type = u"MRR"
1519     if u"NDRPDR" in table.get(u"filter", list()):
1520         test_type = u"NDRPDR"
1521
1522     # Prepare the header of the tables
1523     header = [
1524         u"Test Case",
1525         u"Failures [#]",
1526         u"Last Failure [Time]",
1527         u"Last Failure [VPP-Build-Id]",
1528         u"Last Failure [CSIT-Job-Build-Id]"
1529     ]
1530
1531     # Generate the data for the table according to the model in the table
1532     # specification
1533
1534     now = dt.utcnow()
1535     timeperiod = timedelta(int(table.get(u"window", 7)))
1536
1537     tbl_dict = dict()
1538     for job, builds in table[u"data"].items():
1539         for build in builds:
1540             build = str(build)
1541             for tst_name, tst_data in data[job][build].items():
1542                 if tst_name.lower() in table.get(u"ignore-list", list()):
1543                     continue
1544                 if tbl_dict.get(tst_name, None) is None:
1545                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1546                     if not groups:
1547                         continue
1548                     nic = groups.group(0)
1549                     tbl_dict[tst_name] = {
1550                         u"name": f"{nic}-{tst_data[u'name']}",
1551                         u"data": OrderedDict()
1552                     }
1553                 try:
1554                     generated = input_data.metadata(job, build).\
1555                         get(u"generated", u"")
1556                     if not generated:
1557                         continue
1558                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1559                     if (now - then) <= timeperiod:
1560                         tbl_dict[tst_name][u"data"][build] = (
1561                             tst_data[u"status"],
1562                             generated,
1563                             input_data.metadata(job, build).get(u"version",
1564                                                                 u""),
1565                             build
1566                         )
1567                 except (TypeError, KeyError) as err:
1568                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1569
1570     max_fails = 0
1571     tbl_lst = list()
1572     for tst_data in tbl_dict.values():
1573         fails_nr = 0
1574         fails_last_date = u""
1575         fails_last_vpp = u""
1576         fails_last_csit = u""
1577         for val in tst_data[u"data"].values():
1578             if val[0] == u"FAIL":
1579                 fails_nr += 1
1580                 fails_last_date = val[1]
1581                 fails_last_vpp = val[2]
1582                 fails_last_csit = val[3]
1583         if fails_nr:
1584             max_fails = fails_nr if fails_nr > max_fails else max_fails
1585             tbl_lst.append([
1586                 tst_data[u"name"],
1587                 fails_nr,
1588                 fails_last_date,
1589                 fails_last_vpp,
1590                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1591                 f"-build-{fails_last_csit}"
1592             ])
1593
1594     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1595     tbl_sorted = list()
1596     for nrf in range(max_fails, -1, -1):
1597         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1598         tbl_sorted.extend(tbl_fails)
1599
1600     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1601     logging.info(f"    Writing file: {file_name}")
1602     with open(file_name, u"wt") as file_handler:
1603         file_handler.write(u",".join(header) + u"\n")
1604         for test in tbl_sorted:
1605             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1606
1607     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1608     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1609
1610
1611 def table_failed_tests_html(table, input_data):
1612     """Generate the table(s) with algorithm: table_failed_tests_html
1613     specified in the specification file.
1614
1615     :param table: Table to generate.
1616     :param input_data: Data to process.
1617     :type table: pandas.Series
1618     :type input_data: InputData
1619     """
1620
1621     _ = input_data
1622
1623     if not table.get(u"testbed", None):
1624         logging.error(
1625             f"The testbed is not defined for the table "
1626             f"{table.get(u'title', u'')}. Skipping."
1627         )
1628         return
1629
1630     test_type = table.get(u"test-type", u"MRR")
1631     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1632         logging.error(
1633             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1634             f"Skipping."
1635         )
1636         return
1637
1638     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1639         lnk_dir = u"../ndrpdr_trending/"
1640         lnk_sufix = u"-pdr"
1641     else:
1642         lnk_dir = u"../trending/"
1643         lnk_sufix = u""
1644
1645     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1646
1647     try:
1648         with open(table[u"input-file"], u'rt') as csv_file:
1649             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1650     except KeyError:
1651         logging.warning(u"The input file is not defined.")
1652         return
1653     except csv.Error as err:
1654         logging.warning(
1655             f"Not possible to process the file {table[u'input-file']}.\n"
1656             f"{repr(err)}"
1657         )
1658         return
1659
1660     # Table:
1661     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1662
1663     # Table header:
1664     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1665     for idx, item in enumerate(csv_lst[0]):
1666         alignment = u"left" if idx == 0 else u"center"
1667         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1668         thead.text = item
1669
1670     # Rows:
1671     colors = (u"#e9f1fb", u"#d4e4f7")
1672     for r_idx, row in enumerate(csv_lst[1:]):
1673         background = colors[r_idx % 2]
1674         trow = ET.SubElement(
1675             failed_tests, u"tr", attrib=dict(bgcolor=background)
1676         )
1677
1678         # Columns:
1679         for c_idx, item in enumerate(row):
1680             tdata = ET.SubElement(
1681                 trow,
1682                 u"td",
1683                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1684             )
1685             # Name:
1686             if c_idx == 0 and table.get(u"add-links", True):
1687                 ref = ET.SubElement(
1688                     tdata,
1689                     u"a",
1690                     attrib=dict(
1691                         href=f"{lnk_dir}"
1692                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1693                         f"{lnk_sufix}"
1694                     )
1695                 )
1696                 ref.text = item
1697             else:
1698                 tdata.text = item
1699     try:
1700         with open(table[u"output-file"], u'w') as html_file:
1701             logging.info(f"    Writing file: {table[u'output-file']}")
1702             html_file.write(u".. raw:: html\n\n\t")
1703             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1704             html_file.write(u"\n\t<p><br><br></p>\n")
1705     except KeyError:
1706         logging.warning(u"The output file is not defined.")
1707         return
1708
1709
1710 def table_comparison(table, input_data):
1711     """Generate the table(s) with algorithm: table_comparison
1712     specified in the specification file.
1713
1714     :param table: Table to generate.
1715     :param input_data: Data to process.
1716     :type table: pandas.Series
1717     :type input_data: InputData
1718     """
1719     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1720
1721     # Transform the data
1722     logging.info(
1723         f"    Creating the data set for the {table.get(u'type', u'')} "
1724         f"{table.get(u'title', u'')}."
1725     )
1726
1727     columns = table.get(u"columns", None)
1728     if not columns:
1729         logging.error(
1730             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1731         )
1732         return
1733
1734     cols = list()
1735     for idx, col in enumerate(columns):
1736         if col.get(u"data-set", None) is None:
1737             logging.warning(f"No data for column {col.get(u'title', u'')}")
1738             continue
1739         tag = col.get(u"tag", None)
1740         data = input_data.filter_data(
1741             table,
1742             params=[
1743                 u"throughput",
1744                 u"result",
1745                 u"latency",
1746                 u"name",
1747                 u"parent",
1748                 u"tags"
1749             ],
1750             data=col[u"data-set"],
1751             continue_on_error=True
1752         )
1753         col_data = {
1754             u"title": col.get(u"title", f"Column{idx}"),
1755             u"data": dict()
1756         }
1757         for builds in data.values:
1758             for build in builds:
1759                 for tst_name, tst_data in build.items():
1760                     if tag and tag not in tst_data[u"tags"]:
1761                         continue
1762                     tst_name_mod = \
1763                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1764                         replace(u"2n1l-", u"")
1765                     if col_data[u"data"].get(tst_name_mod, None) is None:
1766                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1767                         if u"across testbeds" in table[u"title"].lower() or \
1768                                 u"across topologies" in table[u"title"].lower():
1769                             name = _tpc_modify_displayed_test_name(name)
1770                         col_data[u"data"][tst_name_mod] = {
1771                             u"name": name,
1772                             u"replace": True,
1773                             u"data": list(),
1774                             u"mean": None,
1775                             u"stdev": None
1776                         }
1777                     _tpc_insert_data(
1778                         target=col_data[u"data"][tst_name_mod],
1779                         src=tst_data,
1780                         include_tests=table[u"include-tests"]
1781                     )
1782
1783         replacement = col.get(u"data-replacement", None)
1784         if replacement:
1785             rpl_data = input_data.filter_data(
1786                 table,
1787                 params=[
1788                     u"throughput",
1789                     u"result",
1790                     u"latency",
1791                     u"name",
1792                     u"parent",
1793                     u"tags"
1794                 ],
1795                 data=replacement,
1796                 continue_on_error=True
1797             )
1798             for builds in rpl_data.values:
1799                 for build in builds:
1800                     for tst_name, tst_data in build.items():
1801                         if tag and tag not in tst_data[u"tags"]:
1802                             continue
1803                         tst_name_mod = \
1804                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1805                             replace(u"2n1l-", u"")
1806                         if col_data[u"data"].get(tst_name_mod, None) is None:
1807                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1808                             if u"across testbeds" in table[u"title"].lower() \
1809                                     or u"across topologies" in \
1810                                     table[u"title"].lower():
1811                                 name = _tpc_modify_displayed_test_name(name)
1812                             col_data[u"data"][tst_name_mod] = {
1813                                 u"name": name,
1814                                 u"replace": False,
1815                                 u"data": list(),
1816                                 u"mean": None,
1817                                 u"stdev": None
1818                             }
1819                         if col_data[u"data"][tst_name_mod][u"replace"]:
1820                             col_data[u"data"][tst_name_mod][u"replace"] = False
1821                             col_data[u"data"][tst_name_mod][u"data"] = list()
1822                         _tpc_insert_data(
1823                             target=col_data[u"data"][tst_name_mod],
1824                             src=tst_data,
1825                             include_tests=table[u"include-tests"]
1826                         )
1827
1828         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1829                 u"latency" in table[u"include-tests"]:
1830             for tst_name, tst_data in col_data[u"data"].items():
1831                 if tst_data[u"data"]:
1832                     tst_data[u"mean"] = mean(tst_data[u"data"])
1833                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1834
1835         cols.append(col_data)
1836
1837     tbl_dict = dict()
1838     for col in cols:
1839         for tst_name, tst_data in col[u"data"].items():
1840             if tbl_dict.get(tst_name, None) is None:
1841                 tbl_dict[tst_name] = {
1842                     "name": tst_data[u"name"]
1843                 }
1844             tbl_dict[tst_name][col[u"title"]] = {
1845                 u"mean": tst_data[u"mean"],
1846                 u"stdev": tst_data[u"stdev"]
1847             }
1848
1849     if not tbl_dict:
1850         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1851         return
1852
1853     tbl_lst = list()
1854     for tst_data in tbl_dict.values():
1855         row = [tst_data[u"name"], ]
1856         for col in cols:
1857             row.append(tst_data.get(col[u"title"], None))
1858         tbl_lst.append(row)
1859
1860     comparisons = table.get(u"comparisons", None)
1861     rcas = list()
1862     if comparisons and isinstance(comparisons, list):
1863         for idx, comp in enumerate(comparisons):
1864             try:
1865                 col_ref = int(comp[u"reference"])
1866                 col_cmp = int(comp[u"compare"])
1867             except KeyError:
1868                 logging.warning(u"Comparison: No references defined! Skipping.")
1869                 comparisons.pop(idx)
1870                 continue
1871             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1872                     col_ref == col_cmp):
1873                 logging.warning(f"Wrong values of reference={col_ref} "
1874                                 f"and/or compare={col_cmp}. Skipping.")
1875                 comparisons.pop(idx)
1876                 continue
1877             rca_file_name = comp.get(u"rca-file", None)
1878             if rca_file_name:
1879                 try:
1880                     with open(rca_file_name, u"r") as file_handler:
1881                         rcas.append(
1882                             {
1883                                 u"title": f"RCA{idx + 1}",
1884                                 u"data": load(file_handler, Loader=FullLoader)
1885                             }
1886                         )
1887                 except (YAMLError, IOError) as err:
1888                     logging.warning(
1889                         f"The RCA file {rca_file_name} does not exist or "
1890                         f"it is corrupted!"
1891                     )
1892                     logging.debug(repr(err))
1893                     rcas.append(None)
1894             else:
1895                 rcas.append(None)
1896     else:
1897         comparisons = None
1898
1899     tbl_cmp_lst = list()
1900     if comparisons:
1901         for row in tbl_lst:
1902             new_row = deepcopy(row)
1903             for comp in comparisons:
1904                 ref_itm = row[int(comp[u"reference"])]
1905                 if ref_itm is None and \
1906                         comp.get(u"reference-alt", None) is not None:
1907                     ref_itm = row[int(comp[u"reference-alt"])]
1908                 cmp_itm = row[int(comp[u"compare"])]
1909                 if ref_itm is not None and cmp_itm is not None and \
1910                         ref_itm[u"mean"] is not None and \
1911                         cmp_itm[u"mean"] is not None and \
1912                         ref_itm[u"stdev"] is not None and \
1913                         cmp_itm[u"stdev"] is not None:
1914                     try:
1915                         delta, d_stdev = relative_change_stdev(
1916                             ref_itm[u"mean"], cmp_itm[u"mean"],
1917                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1918                         )
1919                     except ZeroDivisionError:
1920                         break
1921                     if delta is None or math.isnan(delta):
1922                         break
1923                     new_row.append({
1924                         u"mean": delta * 1e6,
1925                         u"stdev": d_stdev * 1e6
1926                     })
1927                 else:
1928                     break
1929             else:
1930                 tbl_cmp_lst.append(new_row)
1931
1932     try:
1933         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1934         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1935     except TypeError as err:
1936         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1937
1938     tbl_for_csv = list()
1939     for line in tbl_cmp_lst:
1940         row = [line[0], ]
1941         for idx, itm in enumerate(line[1:]):
1942             if itm is None or not isinstance(itm, dict) or\
1943                     itm.get(u'mean', None) is None or \
1944                     itm.get(u'stdev', None) is None:
1945                 row.append(u"NT")
1946                 row.append(u"NT")
1947             else:
1948                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1949                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1950         for rca in rcas:
1951             if rca is None:
1952                 continue
1953             rca_nr = rca[u"data"].get(row[0], u"-")
1954             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1955         tbl_for_csv.append(row)
1956
1957     header_csv = [u"Test Case", ]
1958     for col in cols:
1959         header_csv.append(f"Avg({col[u'title']})")
1960         header_csv.append(f"Stdev({col[u'title']})")
1961     for comp in comparisons:
1962         header_csv.append(
1963             f"Avg({comp.get(u'title', u'')})"
1964         )
1965         header_csv.append(
1966             f"Stdev({comp.get(u'title', u'')})"
1967         )
1968     for rca in rcas:
1969         if rca:
1970             header_csv.append(rca[u"title"])
1971
1972     legend_lst = table.get(u"legend", None)
1973     if legend_lst is None:
1974         legend = u""
1975     else:
1976         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1977
1978     footnote = u""
1979     if rcas and any(rcas):
1980         footnote += u"\nRoot Cause Analysis:\n"
1981         for rca in rcas:
1982             if rca:
1983                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1984
1985     csv_file_name = f"{table[u'output-file']}-csv.csv"
1986     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1987         file_handler.write(
1988             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1989         )
1990         for test in tbl_for_csv:
1991             file_handler.write(
1992                 u",".join([f'"{item}"' for item in test]) + u"\n"
1993             )
1994         if legend_lst:
1995             for item in legend_lst:
1996                 file_handler.write(f'"{item}"\n')
1997         if footnote:
1998             for itm in footnote.split(u"\n"):
1999                 file_handler.write(f'"{itm}"\n')
2000
2001     tbl_tmp = list()
2002     max_lens = [0, ] * len(tbl_cmp_lst[0])
2003     for line in tbl_cmp_lst:
2004         row = [line[0], ]
2005         for idx, itm in enumerate(line[1:]):
2006             if itm is None or not isinstance(itm, dict) or \
2007                     itm.get(u'mean', None) is None or \
2008                     itm.get(u'stdev', None) is None:
2009                 new_itm = u"NT"
2010             else:
2011                 if idx < len(cols):
2012                     new_itm = (
2013                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
2014                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2015                         replace(u"nan", u"NaN")
2016                     )
2017                 else:
2018                     new_itm = (
2019                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2020                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2021                         replace(u"nan", u"NaN")
2022                     )
2023             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2024                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2025             row.append(new_itm)
2026
2027         tbl_tmp.append(row)
2028
2029     header = [u"Test Case", ]
2030     header.extend([col[u"title"] for col in cols])
2031     header.extend([comp.get(u"title", u"") for comp in comparisons])
2032
2033     tbl_final = list()
2034     for line in tbl_tmp:
2035         row = [line[0], ]
2036         for idx, itm in enumerate(line[1:]):
2037             if itm in (u"NT", u"NaN"):
2038                 row.append(itm)
2039                 continue
2040             itm_lst = itm.rsplit(u"\u00B1", 1)
2041             itm_lst[-1] = \
2042                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2043             itm_str = u"\u00B1".join(itm_lst)
2044
2045             if idx >= len(cols):
2046                 # Diffs
2047                 rca = rcas[idx - len(cols)]
2048                 if rca:
2049                     # Add rcas to diffs
2050                     rca_nr = rca[u"data"].get(row[0], None)
2051                     if rca_nr:
2052                         hdr_len = len(header[idx + 1]) - 1
2053                         if hdr_len < 19:
2054                             hdr_len = 19
2055                         rca_nr = f"[{rca_nr}]"
2056                         itm_str = (
2057                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2058                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2059                             f"{itm_str}"
2060                         )
2061             row.append(itm_str)
2062         tbl_final.append(row)
2063
2064     # Generate csv tables:
2065     csv_file_name = f"{table[u'output-file']}.csv"
2066     logging.info(f"    Writing the file {csv_file_name}")
2067     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2068         file_handler.write(u";".join(header) + u"\n")
2069         for test in tbl_final:
2070             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2071
2072     # Generate txt table:
2073     txt_file_name = f"{table[u'output-file']}.txt"
2074     logging.info(f"    Writing the file {txt_file_name}")
2075     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2076
2077     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2078         file_handler.write(legend)
2079         file_handler.write(footnote)
2080
2081     # Generate html table:
2082     _tpc_generate_html_table(
2083         header,
2084         tbl_final,
2085         table[u'output-file'],
2086         legend=legend,
2087         footnote=footnote,
2088         sort_data=False,
2089         title=table.get(u"title", u"")
2090     )
2091
2092
2093 def table_weekly_comparison(table, in_data):
2094     """Generate the table(s) with algorithm: table_weekly_comparison
2095     specified in the specification file.
2096
2097     :param table: Table to generate.
2098     :param in_data: Data to process.
2099     :type table: pandas.Series
2100     :type in_data: InputData
2101     """
2102     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2103
2104     # Transform the data
2105     logging.info(
2106         f"    Creating the data set for the {table.get(u'type', u'')} "
2107         f"{table.get(u'title', u'')}."
2108     )
2109
2110     incl_tests = table.get(u"include-tests", None)
2111     if incl_tests not in (u"NDR", u"PDR"):
2112         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2113         return
2114
2115     nr_cols = table.get(u"nr-of-data-columns", None)
2116     if not nr_cols or nr_cols < 2:
2117         logging.error(
2118             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2119         )
2120         return
2121
2122     data = in_data.filter_data(
2123         table,
2124         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2125         continue_on_error=True
2126     )
2127
2128     header = [
2129         [u"VPP Version", ],
2130         [u"Start Timestamp", ],
2131         [u"CSIT Build", ],
2132         [u"CSIT Testbed", ]
2133     ]
2134     tbl_dict = dict()
2135     idx = 0
2136     tb_tbl = table.get(u"testbeds", None)
2137     for job_name, job_data in data.items():
2138         for build_nr, build in job_data.items():
2139             if idx >= nr_cols:
2140                 break
2141             if build.empty:
2142                 continue
2143
2144             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2145             if tb_ip and tb_tbl:
2146                 testbed = tb_tbl.get(tb_ip, u"")
2147             else:
2148                 testbed = u""
2149             header[2].insert(1, build_nr)
2150             header[3].insert(1, testbed)
2151             header[1].insert(
2152                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2153             )
2154             header[0].insert(
2155                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2156             )
2157
2158             for tst_name, tst_data in build.items():
2159                 tst_name_mod = \
2160                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2161                 if not tbl_dict.get(tst_name_mod, None):
2162                     tbl_dict[tst_name_mod] = dict(
2163                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2164                     )
2165                 try:
2166                     tbl_dict[tst_name_mod][-idx - 1] = \
2167                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2168                 except (TypeError, IndexError, KeyError, ValueError):
2169                     pass
2170             idx += 1
2171
2172     if idx < nr_cols:
2173         logging.error(u"Not enough data to build the table! Skipping")
2174         return
2175
2176     cmp_dict = dict()
2177     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2178         idx_ref = cmp.get(u"reference", None)
2179         idx_cmp = cmp.get(u"compare", None)
2180         if idx_ref is None or idx_cmp is None:
2181             continue
2182         header[0].append(
2183             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2184             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2185         )
2186         header[1].append(u"")
2187         header[2].append(u"")
2188         header[3].append(u"")
2189         for tst_name, tst_data in tbl_dict.items():
2190             if not cmp_dict.get(tst_name, None):
2191                 cmp_dict[tst_name] = list()
2192             ref_data = tst_data.get(idx_ref, None)
2193             cmp_data = tst_data.get(idx_cmp, None)
2194             if ref_data is None or cmp_data is None:
2195                 cmp_dict[tst_name].append(float(u'nan'))
2196             else:
2197                 cmp_dict[tst_name].append(
2198                     relative_change(ref_data, cmp_data)
2199                 )
2200
2201     tbl_lst_none = list()
2202     tbl_lst = list()
2203     for tst_name, tst_data in tbl_dict.items():
2204         itm_lst = [tst_data[u"name"], ]
2205         for idx in range(nr_cols):
2206             item = tst_data.get(-idx - 1, None)
2207             if item is None:
2208                 itm_lst.insert(1, None)
2209             else:
2210                 itm_lst.insert(1, round(item / 1e6, 1))
2211         itm_lst.extend(
2212             [
2213                 None if itm is None else round(itm, 1)
2214                 for itm in cmp_dict[tst_name]
2215             ]
2216         )
2217         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2218             tbl_lst_none.append(itm_lst)
2219         else:
2220             tbl_lst.append(itm_lst)
2221
2222     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2223     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2224     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2225     tbl_lst.extend(tbl_lst_none)
2226
2227     # Generate csv table:
2228     csv_file_name = f"{table[u'output-file']}.csv"
2229     logging.info(f"    Writing the file {csv_file_name}")
2230     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2231         for hdr in header:
2232             file_handler.write(u",".join(hdr) + u"\n")
2233         for test in tbl_lst:
2234             file_handler.write(u",".join(
2235                 [
2236                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2237                     replace(u"null", u"-") for item in test
2238                 ]
2239             ) + u"\n")
2240
2241     txt_file_name = f"{table[u'output-file']}.txt"
2242     logging.info(f"    Writing the file {txt_file_name}")
2243     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2244
2245     # Reorganize header in txt table
2246     txt_table = list()
2247     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2248         for line in list(file_handler):
2249             txt_table.append(line)
2250     try:
2251         txt_table.insert(5, txt_table.pop(2))
2252         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2253             file_handler.writelines(txt_table)
2254     except IndexError:
2255         pass
2256
2257     # Generate html table:
2258     hdr_html = [
2259         u"<br>".join(row) for row in zip(*header)
2260     ]
2261     _tpc_generate_html_table(
2262         hdr_html,
2263         tbl_lst,
2264         table[u'output-file'],
2265         sort_data=True,
2266         title=table.get(u"title", u""),
2267         generate_rst=False
2268     )