a995711bcb54c08fa2cfb10770cf006f71ce80b7
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     if gdata[u"vectors"] > 0:
301                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
302                     elif gdata[u"calls"] > 0:
303                         clocks = gdata[u"clocks"] / gdata[u"calls"]
304                     elif gdata[u"suspends"] > 0:
305                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
306                     else:
307                         clocks = 0.0
308                     if gdata[u"calls"] > 0:
309                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
310                     else:
311                         vectors_call = 0.0
312                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
313                             int(gdata[u"suspends"]):
314                         threads[idx].append([
315                             gnode,
316                             int(gdata[u"calls"]),
317                             int(gdata[u"vectors"]),
318                             int(gdata[u"suspends"]),
319                             clocks,
320                             vectors_call
321                         ])
322
323             bold = ET.SubElement(tcol, u"b")
324             bold.text = (
325                 f"Host IP: {dut_data.get(u'host', '')}, "
326                 f"Socket: {dut_data.get(u'socket', '')}"
327             )
328             trow = ET.SubElement(
329                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
330             )
331             thead = ET.SubElement(
332                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
333             )
334             thead.text = u"\t"
335
336             for thread_nr, thread in threads.items():
337                 trow = ET.SubElement(
338                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
339                 )
340                 tcol = ET.SubElement(
341                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
342                 )
343                 bold = ET.SubElement(tcol, u"b")
344                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
345                 trow = ET.SubElement(
346                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
347                 )
348                 for idx, col in enumerate(tbl_hdr):
349                     tcol = ET.SubElement(
350                         trow, u"td",
351                         attrib=dict(align=u"right" if idx else u"left")
352                     )
353                     font = ET.SubElement(
354                         tcol, u"font", attrib=dict(size=u"2")
355                     )
356                     bold = ET.SubElement(font, u"b")
357                     bold.text = col
358                 for row_nr, row in enumerate(thread):
359                     trow = ET.SubElement(
360                         tbl, u"tr",
361                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
362                     )
363                     for idx, col in enumerate(row):
364                         tcol = ET.SubElement(
365                             trow, u"td",
366                             attrib=dict(align=u"right" if idx else u"left")
367                         )
368                         font = ET.SubElement(
369                             tcol, u"font", attrib=dict(size=u"2")
370                         )
371                         if isinstance(col, float):
372                             font.text = f"{col:.2f}"
373                         else:
374                             font.text = str(col)
375                 trow = ET.SubElement(
376                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
377                 )
378                 thead = ET.SubElement(
379                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
380                 )
381                 thead.text = u"\t"
382
383         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
384         thead = ET.SubElement(
385             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
386         )
387         font = ET.SubElement(
388             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
389         )
390         font.text = u"."
391
392         return str(ET.tostring(tbl, encoding=u"unicode"))
393
394     for suite in suites.values:
395         html_table = str()
396         for test_data in data.values:
397             if test_data[u"parent"] not in suite[u"name"]:
398                 continue
399             html_table += _generate_html_table(test_data)
400         if not html_table:
401             continue
402         try:
403             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
404             with open(f"{file_name}", u'w') as html_file:
405                 logging.info(f"    Writing file: {file_name}")
406                 html_file.write(u".. raw:: html\n\n\t")
407                 html_file.write(html_table)
408                 html_file.write(u"\n\t<p><br><br></p>\n")
409         except KeyError:
410             logging.warning(u"The output file is not defined.")
411             return
412     logging.info(u"  Done.")
413
414
415 def table_merged_details(table, input_data):
416     """Generate the table(s) with algorithm: table_merged_details
417     specified in the specification file.
418
419     :param table: Table to generate.
420     :param input_data: Data to process.
421     :type table: pandas.Series
422     :type input_data: InputData
423     """
424
425     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
426
427     # Transform the data
428     logging.info(
429         f"    Creating the data set for the {table.get(u'type', u'')} "
430         f"{table.get(u'title', u'')}."
431     )
432     data = input_data.filter_data(table, continue_on_error=True)
433     data = input_data.merge_data(data)
434
435     sort_tests = table.get(u"sort", None)
436     if sort_tests:
437         args = dict(
438             inplace=True,
439             ascending=(sort_tests == u"ascending")
440         )
441         data.sort_index(**args)
442
443     suites = input_data.filter_data(
444         table, continue_on_error=True, data_set=u"suites")
445     suites = input_data.merge_data(suites)
446
447     # Prepare the header of the tables
448     header = list()
449     for column in table[u"columns"]:
450         header.append(
451             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
452         )
453
454     for suite in suites.values:
455         # Generate data
456         suite_name = suite[u"name"]
457         table_lst = list()
458         for test in data.keys():
459             if data[test][u"status"] != u"PASS" or \
460                     data[test][u"parent"] not in suite_name:
461                 continue
462             row_lst = list()
463             for column in table[u"columns"]:
464                 try:
465                     col_data = str(data[test][column[
466                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
467                     # Do not include tests with "Test Failed" in test message
468                     if u"Test Failed" in col_data:
469                         continue
470                     col_data = col_data.replace(
471                         u"No Data", u"Not Captured     "
472                     )
473                     if column[u"data"].split(u" ")[1] in (u"name", ):
474                         if len(col_data) > 30:
475                             col_data_lst = col_data.split(u"-")
476                             half = int(len(col_data_lst) / 2)
477                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
478                                        f"- |br| " \
479                                        f"{u'-'.join(col_data_lst[half:])}"
480                         col_data = f" |prein| {col_data} |preout| "
481                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
482                         # Temporary solution: remove NDR results from message:
483                         if bool(table.get(u'remove-ndr', False)):
484                             try:
485                                 col_data = col_data.split(u"\n", 1)[1]
486                             except IndexError:
487                                 pass
488                         col_data = col_data.replace(u'\n', u' |br| ').\
489                             replace(u'\r', u'').replace(u'"', u"'")
490                         col_data = f" |prein| {col_data} |preout| "
491                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
492                         col_data = col_data.replace(u'\n', u' |br| ')
493                         col_data = f" |prein| {col_data[:-5]} |preout| "
494                     row_lst.append(f'"{col_data}"')
495                 except KeyError:
496                     row_lst.append(u'"Not captured"')
497             if len(row_lst) == len(table[u"columns"]):
498                 table_lst.append(row_lst)
499
500         # Write the data to file
501         if table_lst:
502             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
503             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
504             logging.info(f"      Writing file: {file_name}")
505             with open(file_name, u"wt") as file_handler:
506                 file_handler.write(u",".join(header) + u"\n")
507                 for item in table_lst:
508                     file_handler.write(u",".join(item) + u"\n")
509
510     logging.info(u"  Done.")
511
512
513 def _tpc_modify_test_name(test_name, ignore_nic=False):
514     """Modify a test name by replacing its parts.
515
516     :param test_name: Test name to be modified.
517     :param ignore_nic: If True, NIC is removed from TC name.
518     :type test_name: str
519     :type ignore_nic: bool
520     :returns: Modified test name.
521     :rtype: str
522     """
523     test_name_mod = test_name.\
524         replace(u"-ndrpdr", u"").\
525         replace(u"1t1c", u"1c").\
526         replace(u"2t1c", u"1c"). \
527         replace(u"2t2c", u"2c").\
528         replace(u"4t2c", u"2c"). \
529         replace(u"4t4c", u"4c").\
530         replace(u"8t4c", u"4c")
531
532     if ignore_nic:
533         return re.sub(REGEX_NIC, u"", test_name_mod)
534     return test_name_mod
535
536
537 def _tpc_modify_displayed_test_name(test_name):
538     """Modify a test name which is displayed in a table by replacing its parts.
539
540     :param test_name: Test name to be modified.
541     :type test_name: str
542     :returns: Modified test name.
543     :rtype: str
544     """
545     return test_name.\
546         replace(u"1t1c", u"1c").\
547         replace(u"2t1c", u"1c"). \
548         replace(u"2t2c", u"2c").\
549         replace(u"4t2c", u"2c"). \
550         replace(u"4t4c", u"4c").\
551         replace(u"8t4c", u"4c")
552
553
554 def _tpc_insert_data(target, src, include_tests):
555     """Insert src data to the target structure.
556
557     :param target: Target structure where the data is placed.
558     :param src: Source data to be placed into the target structure.
559     :param include_tests: Which results will be included (MRR, NDR, PDR).
560     :type target: list
561     :type src: dict
562     :type include_tests: str
563     """
564     try:
565         if include_tests == u"MRR":
566             target[u"mean"] = src[u"result"][u"receive-rate"]
567             target[u"stdev"] = src[u"result"][u"receive-stdev"]
568         elif include_tests == u"PDR":
569             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
570         elif include_tests == u"NDR":
571             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
572         elif u"latency" in include_tests:
573             keys = include_tests.split(u"-")
574             if len(keys) == 4:
575                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
576                 target[u"data"].append(
577                     float(u"nan") if lat == -1 else lat * 1e6
578                 )
579     except (KeyError, TypeError):
580         pass
581
582
583 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
584                              footnote=u"", sort_data=True, title=u"",
585                              generate_rst=True):
586     """Generate html table from input data with simple sorting possibility.
587
588     :param header: Table header.
589     :param data: Input data to be included in the table. It is a list of lists.
590         Inner lists are rows in the table. All inner lists must be of the same
591         length. The length of these lists must be the same as the length of the
592         header.
593     :param out_file_name: The name (relative or full path) where the
594         generated html table is written.
595     :param legend: The legend to display below the table.
596     :param footnote: The footnote to display below the table (and legend).
597     :param sort_data: If True the data sorting is enabled.
598     :param title: The table (and file) title.
599     :param generate_rst: If True, wrapping rst file is generated.
600     :type header: list
601     :type data: list of lists
602     :type out_file_name: str
603     :type legend: str
604     :type footnote: str
605     :type sort_data: bool
606     :type title: str
607     :type generate_rst: bool
608     """
609
610     try:
611         idx = header.index(u"Test Case")
612     except ValueError:
613         idx = 0
614     params = {
615         u"align-hdr": (
616             [u"left", u"right"],
617             [u"left", u"left", u"right"],
618             [u"left", u"left", u"left", u"right"]
619         ),
620         u"align-itm": (
621             [u"left", u"right"],
622             [u"left", u"left", u"right"],
623             [u"left", u"left", u"left", u"right"]
624         ),
625         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
626     }
627
628     df_data = pd.DataFrame(data, columns=header)
629
630     if sort_data:
631         df_sorted = [df_data.sort_values(
632             by=[key, header[idx]], ascending=[True, True]
633             if key != header[idx] else [False, True]) for key in header]
634         df_sorted_rev = [df_data.sort_values(
635             by=[key, header[idx]], ascending=[False, True]
636             if key != header[idx] else [True, True]) for key in header]
637         df_sorted.extend(df_sorted_rev)
638     else:
639         df_sorted = df_data
640
641     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
642                    for idx in range(len(df_data))]]
643     table_header = dict(
644         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
645         fill_color=u"#7eade7",
646         align=params[u"align-hdr"][idx],
647         font=dict(
648             family=u"Courier New",
649             size=12
650         )
651     )
652
653     fig = go.Figure()
654
655     if sort_data:
656         for table in df_sorted:
657             columns = [table.get(col) for col in header]
658             fig.add_trace(
659                 go.Table(
660                     columnwidth=params[u"width"][idx],
661                     header=table_header,
662                     cells=dict(
663                         values=columns,
664                         fill_color=fill_color,
665                         align=params[u"align-itm"][idx],
666                         font=dict(
667                             family=u"Courier New",
668                             size=12
669                         )
670                     )
671                 )
672             )
673
674         buttons = list()
675         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
676         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
677         for idx, hdr in enumerate(menu_items):
678             visible = [False, ] * len(menu_items)
679             visible[idx] = True
680             buttons.append(
681                 dict(
682                     label=hdr.replace(u" [Mpps]", u""),
683                     method=u"update",
684                     args=[{u"visible": visible}],
685                 )
686             )
687
688         fig.update_layout(
689             updatemenus=[
690                 go.layout.Updatemenu(
691                     type=u"dropdown",
692                     direction=u"down",
693                     x=0.0,
694                     xanchor=u"left",
695                     y=1.002,
696                     yanchor=u"bottom",
697                     active=len(menu_items) - 1,
698                     buttons=list(buttons)
699                 )
700             ],
701         )
702     else:
703         fig.add_trace(
704             go.Table(
705                 columnwidth=params[u"width"][idx],
706                 header=table_header,
707                 cells=dict(
708                     values=[df_sorted.get(col) for col in header],
709                     fill_color=fill_color,
710                     align=params[u"align-itm"][idx],
711                     font=dict(
712                         family=u"Courier New",
713                         size=12
714                     )
715                 )
716             )
717         )
718
719     ploff.plot(
720         fig,
721         show_link=False,
722         auto_open=False,
723         filename=f"{out_file_name}_in.html"
724     )
725
726     if not generate_rst:
727         return
728
729     file_name = out_file_name.split(u"/")[-1]
730     if u"vpp" in out_file_name:
731         path = u"_tmp/src/vpp_performance_tests/comparisons/"
732     else:
733         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
734     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
735     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
736         rst_file.write(
737             u"\n"
738             u".. |br| raw:: html\n\n    <br />\n\n\n"
739             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
740             u".. |preout| raw:: html\n\n    </pre>\n\n"
741         )
742         if title:
743             rst_file.write(f"{title}\n")
744             rst_file.write(f"{u'`' * len(title)}\n\n")
745         rst_file.write(
746             u".. raw:: html\n\n"
747             f'    <iframe frameborder="0" scrolling="no" '
748             f'width="1600" height="1200" '
749             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
750             f'</iframe>\n\n'
751         )
752
753         if legend:
754             try:
755                 itm_lst = legend[1:-2].split(u"\n")
756                 rst_file.write(
757                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
758                 )
759             except IndexError as err:
760                 logging.error(f"Legend cannot be written to html file\n{err}")
761         if footnote:
762             try:
763                 itm_lst = footnote[1:].split(u"\n")
764                 rst_file.write(
765                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
766                 )
767             except IndexError as err:
768                 logging.error(f"Footnote cannot be written to html file\n{err}")
769
770
771 def table_soak_vs_ndr(table, input_data):
772     """Generate the table(s) with algorithm: table_soak_vs_ndr
773     specified in the specification file.
774
775     :param table: Table to generate.
776     :param input_data: Data to process.
777     :type table: pandas.Series
778     :type input_data: InputData
779     """
780
781     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
782
783     # Transform the data
784     logging.info(
785         f"    Creating the data set for the {table.get(u'type', u'')} "
786         f"{table.get(u'title', u'')}."
787     )
788     data = input_data.filter_data(table, continue_on_error=True)
789
790     # Prepare the header of the table
791     try:
792         header = [
793             u"Test Case",
794             f"Avg({table[u'reference'][u'title']})",
795             f"Stdev({table[u'reference'][u'title']})",
796             f"Avg({table[u'compare'][u'title']})",
797             f"Stdev{table[u'compare'][u'title']})",
798             u"Diff",
799             u"Stdev(Diff)"
800         ]
801         header_str = u";".join(header) + u"\n"
802         legend = (
803             u"\nLegend:\n"
804             f"Avg({table[u'reference'][u'title']}): "
805             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
806             f"from a series of runs of the listed tests.\n"
807             f"Stdev({table[u'reference'][u'title']}): "
808             f"Standard deviation value of {table[u'reference'][u'title']} "
809             f"[Mpps] computed from a series of runs of the listed tests.\n"
810             f"Avg({table[u'compare'][u'title']}): "
811             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
812             f"a series of runs of the listed tests.\n"
813             f"Stdev({table[u'compare'][u'title']}): "
814             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
815             f"computed from a series of runs of the listed tests.\n"
816             f"Diff({table[u'reference'][u'title']},"
817             f"{table[u'compare'][u'title']}): "
818             f"Percentage change calculated for mean values.\n"
819             u"Stdev(Diff): "
820             u"Standard deviation of percentage change calculated for mean "
821             u"values."
822         )
823     except (AttributeError, KeyError) as err:
824         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
825         return
826
827     # Create a list of available SOAK test results:
828     tbl_dict = dict()
829     for job, builds in table[u"compare"][u"data"].items():
830         for build in builds:
831             for tst_name, tst_data in data[job][str(build)].items():
832                 if tst_data[u"type"] == u"SOAK":
833                     tst_name_mod = tst_name.replace(u"-soak", u"")
834                     if tbl_dict.get(tst_name_mod, None) is None:
835                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
836                         nic = groups.group(0) if groups else u""
837                         name = (
838                             f"{nic}-"
839                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
840                         )
841                         tbl_dict[tst_name_mod] = {
842                             u"name": name,
843                             u"ref-data": list(),
844                             u"cmp-data": list()
845                         }
846                     try:
847                         tbl_dict[tst_name_mod][u"cmp-data"].append(
848                             tst_data[u"throughput"][u"LOWER"])
849                     except (KeyError, TypeError):
850                         pass
851     tests_lst = tbl_dict.keys()
852
853     # Add corresponding NDR test results:
854     for job, builds in table[u"reference"][u"data"].items():
855         for build in builds:
856             for tst_name, tst_data in data[job][str(build)].items():
857                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
858                     replace(u"-mrr", u"")
859                 if tst_name_mod not in tests_lst:
860                     continue
861                 try:
862                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
863                         continue
864                     if table[u"include-tests"] == u"MRR":
865                         result = (tst_data[u"result"][u"receive-rate"],
866                                   tst_data[u"result"][u"receive-stdev"])
867                     elif table[u"include-tests"] == u"PDR":
868                         result = \
869                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
870                     elif table[u"include-tests"] == u"NDR":
871                         result = \
872                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
873                     else:
874                         result = None
875                     if result is not None:
876                         tbl_dict[tst_name_mod][u"ref-data"].append(
877                             result)
878                 except (KeyError, TypeError):
879                     continue
880
881     tbl_lst = list()
882     for tst_name in tbl_dict:
883         item = [tbl_dict[tst_name][u"name"], ]
884         data_r = tbl_dict[tst_name][u"ref-data"]
885         if data_r:
886             if table[u"include-tests"] == u"MRR":
887                 data_r_mean = data_r[0][0]
888                 data_r_stdev = data_r[0][1]
889             else:
890                 data_r_mean = mean(data_r)
891                 data_r_stdev = stdev(data_r)
892             item.append(round(data_r_mean / 1e6, 1))
893             item.append(round(data_r_stdev / 1e6, 1))
894         else:
895             data_r_mean = None
896             data_r_stdev = None
897             item.extend([None, None])
898         data_c = tbl_dict[tst_name][u"cmp-data"]
899         if data_c:
900             if table[u"include-tests"] == u"MRR":
901                 data_c_mean = data_c[0][0]
902                 data_c_stdev = data_c[0][1]
903             else:
904                 data_c_mean = mean(data_c)
905                 data_c_stdev = stdev(data_c)
906             item.append(round(data_c_mean / 1e6, 1))
907             item.append(round(data_c_stdev / 1e6, 1))
908         else:
909             data_c_mean = None
910             data_c_stdev = None
911             item.extend([None, None])
912         if data_r_mean is not None and data_c_mean is not None:
913             delta, d_stdev = relative_change_stdev(
914                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
915             try:
916                 item.append(round(delta))
917             except ValueError:
918                 item.append(delta)
919             try:
920                 item.append(round(d_stdev))
921             except ValueError:
922                 item.append(d_stdev)
923             tbl_lst.append(item)
924
925     # Sort the table according to the relative change
926     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
927
928     # Generate csv tables:
929     csv_file_name = f"{table[u'output-file']}.csv"
930     with open(csv_file_name, u"wt") as file_handler:
931         file_handler.write(header_str)
932         for test in tbl_lst:
933             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
934
935     convert_csv_to_pretty_txt(
936         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
937     )
938     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
939         file_handler.write(legend)
940
941     # Generate html table:
942     _tpc_generate_html_table(
943         header,
944         tbl_lst,
945         table[u'output-file'],
946         legend=legend,
947         title=table.get(u"title", u"")
948     )
949
950
951 def table_perf_trending_dash(table, input_data):
952     """Generate the table(s) with algorithm:
953     table_perf_trending_dash
954     specified in the specification file.
955
956     :param table: Table to generate.
957     :param input_data: Data to process.
958     :type table: pandas.Series
959     :type input_data: InputData
960     """
961
962     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
963
964     # Transform the data
965     logging.info(
966         f"    Creating the data set for the {table.get(u'type', u'')} "
967         f"{table.get(u'title', u'')}."
968     )
969     data = input_data.filter_data(table, continue_on_error=True)
970
971     # Prepare the header of the tables
972     header = [
973         u"Test Case",
974         u"Trend [Mpps]",
975         u"Short-Term Change [%]",
976         u"Long-Term Change [%]",
977         u"Regressions [#]",
978         u"Progressions [#]"
979     ]
980     header_str = u",".join(header) + u"\n"
981
982     incl_tests = table.get(u"include-tests", u"MRR")
983
984     # Prepare data to the table:
985     tbl_dict = dict()
986     for job, builds in table[u"data"].items():
987         for build in builds:
988             for tst_name, tst_data in data[job][str(build)].items():
989                 if tst_name.lower() in table.get(u"ignore-list", list()):
990                     continue
991                 if tbl_dict.get(tst_name, None) is None:
992                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
993                     if not groups:
994                         continue
995                     nic = groups.group(0)
996                     tbl_dict[tst_name] = {
997                         u"name": f"{nic}-{tst_data[u'name']}",
998                         u"data": OrderedDict()
999                     }
1000                 try:
1001                     if incl_tests == u"MRR":
1002                         tbl_dict[tst_name][u"data"][str(build)] = \
1003                             tst_data[u"result"][u"receive-rate"]
1004                     elif incl_tests == u"NDR":
1005                         tbl_dict[tst_name][u"data"][str(build)] = \
1006                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1007                     elif incl_tests == u"PDR":
1008                         tbl_dict[tst_name][u"data"][str(build)] = \
1009                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1010                 except (TypeError, KeyError):
1011                     pass  # No data in output.xml for this test
1012
1013     tbl_lst = list()
1014     for tst_name in tbl_dict:
1015         data_t = tbl_dict[tst_name][u"data"]
1016         if len(data_t) < 2:
1017             continue
1018
1019         try:
1020             classification_lst, avgs, _ = classify_anomalies(data_t)
1021         except ValueError as err:
1022             logging.info(f"{err} Skipping")
1023             return
1024
1025         win_size = min(len(data_t), table[u"window"])
1026         long_win_size = min(len(data_t), table[u"long-trend-window"])
1027
1028         try:
1029             max_long_avg = max(
1030                 [x for x in avgs[-long_win_size:-win_size]
1031                  if not isnan(x)])
1032         except ValueError:
1033             max_long_avg = nan
1034         last_avg = avgs[-1]
1035         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1036
1037         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1038             rel_change_last = nan
1039         else:
1040             rel_change_last = round(
1041                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1042
1043         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1044             rel_change_long = nan
1045         else:
1046             rel_change_long = round(
1047                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1048
1049         if classification_lst:
1050             if isnan(rel_change_last) and isnan(rel_change_long):
1051                 continue
1052             if isnan(last_avg) or isnan(rel_change_last) or \
1053                     isnan(rel_change_long):
1054                 continue
1055             tbl_lst.append(
1056                 [tbl_dict[tst_name][u"name"],
1057                  round(last_avg / 1e6, 2),
1058                  rel_change_last,
1059                  rel_change_long,
1060                  classification_lst[-win_size+1:].count(u"regression"),
1061                  classification_lst[-win_size+1:].count(u"progression")])
1062
1063     tbl_lst.sort(key=lambda rel: rel[0])
1064     tbl_lst.sort(key=lambda rel: rel[3])
1065     tbl_lst.sort(key=lambda rel: rel[2])
1066
1067     tbl_sorted = list()
1068     for nrr in range(table[u"window"], -1, -1):
1069         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1070         for nrp in range(table[u"window"], -1, -1):
1071             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1072             tbl_sorted.extend(tbl_out)
1073
1074     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1075
1076     logging.info(f"    Writing file: {file_name}")
1077     with open(file_name, u"wt") as file_handler:
1078         file_handler.write(header_str)
1079         for test in tbl_sorted:
1080             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1081
1082     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1083     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1084
1085
1086 def _generate_url(testbed, test_name):
1087     """Generate URL to a trending plot from the name of the test case.
1088
1089     :param testbed: The testbed used for testing.
1090     :param test_name: The name of the test case.
1091     :type testbed: str
1092     :type test_name: str
1093     :returns: The URL to the plot with the trending data for the given test
1094         case.
1095     :rtype str
1096     """
1097
1098     if u"x520" in test_name:
1099         nic = u"x520"
1100     elif u"x710" in test_name:
1101         nic = u"x710"
1102     elif u"xl710" in test_name:
1103         nic = u"xl710"
1104     elif u"xxv710" in test_name:
1105         nic = u"xxv710"
1106     elif u"vic1227" in test_name:
1107         nic = u"vic1227"
1108     elif u"vic1385" in test_name:
1109         nic = u"vic1385"
1110     elif u"x553" in test_name:
1111         nic = u"x553"
1112     elif u"cx556" in test_name or u"cx556a" in test_name:
1113         nic = u"cx556a"
1114     elif u"ena" in test_name:
1115         nic = u"nitro50g"
1116     else:
1117         nic = u""
1118
1119     if u"64b" in test_name:
1120         frame_size = u"64b"
1121     elif u"78b" in test_name:
1122         frame_size = u"78b"
1123     elif u"imix" in test_name:
1124         frame_size = u"imix"
1125     elif u"9000b" in test_name:
1126         frame_size = u"9000b"
1127     elif u"1518b" in test_name:
1128         frame_size = u"1518b"
1129     elif u"114b" in test_name:
1130         frame_size = u"114b"
1131     else:
1132         frame_size = u""
1133
1134     if u"1t1c" in test_name or \
1135         (u"-1c-" in test_name and
1136          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1137         cores = u"1t1c"
1138     elif u"2t2c" in test_name or \
1139          (u"-2c-" in test_name and
1140           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1141         cores = u"2t2c"
1142     elif u"4t4c" in test_name or \
1143          (u"-4c-" in test_name and
1144           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1145         cores = u"4t4c"
1146     elif u"2t1c" in test_name or \
1147          (u"-1c-" in test_name and
1148           testbed in
1149           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1150         cores = u"2t1c"
1151     elif u"4t2c" in test_name or \
1152          (u"-2c-" in test_name and
1153           testbed in
1154           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1155         cores = u"4t2c"
1156     elif u"8t4c" in test_name or \
1157          (u"-4c-" in test_name and
1158           testbed in
1159           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1160         cores = u"8t4c"
1161     else:
1162         cores = u""
1163
1164     if u"testpmd" in test_name:
1165         driver = u"testpmd"
1166     elif u"l3fwd" in test_name:
1167         driver = u"l3fwd"
1168     elif u"avf" in test_name:
1169         driver = u"avf"
1170     elif u"rdma" in test_name:
1171         driver = u"rdma"
1172     elif u"dnv" in testbed or u"tsh" in testbed:
1173         driver = u"ixgbe"
1174     elif u"ena" in test_name:
1175         driver = u"ena"
1176     else:
1177         driver = u"dpdk"
1178
1179     if u"macip-iacl1s" in test_name:
1180         bsf = u"features-macip-iacl1"
1181     elif u"macip-iacl10s" in test_name:
1182         bsf = u"features-macip-iacl10"
1183     elif u"macip-iacl50s" in test_name:
1184         bsf = u"features-macip-iacl50"
1185     elif u"iacl1s" in test_name:
1186         bsf = u"features-iacl1"
1187     elif u"iacl10s" in test_name:
1188         bsf = u"features-iacl10"
1189     elif u"iacl50s" in test_name:
1190         bsf = u"features-iacl50"
1191     elif u"oacl1s" in test_name:
1192         bsf = u"features-oacl1"
1193     elif u"oacl10s" in test_name:
1194         bsf = u"features-oacl10"
1195     elif u"oacl50s" in test_name:
1196         bsf = u"features-oacl50"
1197     elif u"nat44det" in test_name:
1198         bsf = u"nat44det-bidir"
1199     elif u"nat44ed" in test_name and u"udir" in test_name:
1200         bsf = u"nat44ed-udir"
1201     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1202         bsf = u"udp-cps"
1203     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1204         bsf = u"tcp-cps"
1205     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1206         bsf = u"udp-pps"
1207     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1208         bsf = u"tcp-pps"
1209     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1210         bsf = u"udp-tput"
1211     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1212         bsf = u"tcp-tput"
1213     elif u"udpsrcscale" in test_name:
1214         bsf = u"features-udp"
1215     elif u"iacl" in test_name:
1216         bsf = u"features"
1217     elif u"policer" in test_name:
1218         bsf = u"features"
1219     elif u"adl" in test_name:
1220         bsf = u"features"
1221     elif u"cop" in test_name:
1222         bsf = u"features"
1223     elif u"nat" in test_name:
1224         bsf = u"features"
1225     elif u"macip" in test_name:
1226         bsf = u"features"
1227     elif u"scale" in test_name:
1228         bsf = u"scale"
1229     elif u"base" in test_name:
1230         bsf = u"base"
1231     else:
1232         bsf = u"base"
1233
1234     if u"114b" in test_name and u"vhost" in test_name:
1235         domain = u"vts"
1236     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1237         domain = u"nat44"
1238         if u"nat44det" in test_name:
1239             domain += u"-det-bidir"
1240         else:
1241             domain += u"-ed"
1242         if u"udir" in test_name:
1243             domain += u"-unidir"
1244         elif u"-ethip4udp-" in test_name:
1245             domain += u"-udp"
1246         elif u"-ethip4tcp-" in test_name:
1247             domain += u"-tcp"
1248         if u"-cps" in test_name:
1249             domain += u"-cps"
1250         elif u"-pps" in test_name:
1251             domain += u"-pps"
1252         elif u"-tput" in test_name:
1253             domain += u"-tput"
1254     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1255         domain = u"dpdk"
1256     elif u"memif" in test_name:
1257         domain = u"container_memif"
1258     elif u"srv6" in test_name:
1259         domain = u"srv6"
1260     elif u"vhost" in test_name:
1261         domain = u"vhost"
1262         if u"vppl2xc" in test_name:
1263             driver += u"-vpp"
1264         else:
1265             driver += u"-testpmd"
1266         if u"lbvpplacp" in test_name:
1267             bsf += u"-link-bonding"
1268     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1269         domain = u"nf_service_density_vnfc"
1270     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1271         domain = u"nf_service_density_cnfc"
1272     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1273         domain = u"nf_service_density_cnfp"
1274     elif u"ipsec" in test_name:
1275         domain = u"ipsec"
1276         if u"sw" in test_name:
1277             bsf += u"-sw"
1278         elif u"hw" in test_name:
1279             bsf += u"-hw"
1280     elif u"ethip4vxlan" in test_name:
1281         domain = u"ip4_tunnels"
1282     elif u"ethip4udpgeneve" in test_name:
1283         domain = u"ip4_tunnels"
1284     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1285         domain = u"ip4"
1286     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1287         domain = u"ip6"
1288     elif u"l2xcbase" in test_name or \
1289             u"l2xcscale" in test_name or \
1290             u"l2bdbasemaclrn" in test_name or \
1291             u"l2bdscale" in test_name or \
1292             u"l2patch" in test_name:
1293         domain = u"l2"
1294     else:
1295         domain = u""
1296
1297     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1298     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1299
1300     return file_name + anchor_name
1301
1302
1303 def table_perf_trending_dash_html(table, input_data):
1304     """Generate the table(s) with algorithm:
1305     table_perf_trending_dash_html specified in the specification
1306     file.
1307
1308     :param table: Table to generate.
1309     :param input_data: Data to process.
1310     :type table: dict
1311     :type input_data: InputData
1312     """
1313
1314     _ = input_data
1315
1316     if not table.get(u"testbed", None):
1317         logging.error(
1318             f"The testbed is not defined for the table "
1319             f"{table.get(u'title', u'')}. Skipping."
1320         )
1321         return
1322
1323     test_type = table.get(u"test-type", u"MRR")
1324     if test_type not in (u"MRR", u"NDR", u"PDR"):
1325         logging.error(
1326             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1327             f"Skipping."
1328         )
1329         return
1330
1331     if test_type in (u"NDR", u"PDR"):
1332         lnk_dir = u"../ndrpdr_trending/"
1333         lnk_sufix = f"-{test_type.lower()}"
1334     else:
1335         lnk_dir = u"../trending/"
1336         lnk_sufix = u""
1337
1338     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1339
1340     try:
1341         with open(table[u"input-file"], u'rt') as csv_file:
1342             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1343     except FileNotFoundError as err:
1344         logging.warning(f"{err}")
1345         return
1346     except KeyError:
1347         logging.warning(u"The input file is not defined.")
1348         return
1349     except csv.Error as err:
1350         logging.warning(
1351             f"Not possible to process the file {table[u'input-file']}.\n"
1352             f"{repr(err)}"
1353         )
1354         return
1355
1356     # Table:
1357     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1358
1359     # Table header:
1360     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1361     for idx, item in enumerate(csv_lst[0]):
1362         alignment = u"left" if idx == 0 else u"center"
1363         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1364         thead.text = item
1365
1366     # Rows:
1367     colors = {
1368         u"regression": (
1369             u"#ffcccc",
1370             u"#ff9999"
1371         ),
1372         u"progression": (
1373             u"#c6ecc6",
1374             u"#9fdf9f"
1375         ),
1376         u"normal": (
1377             u"#e9f1fb",
1378             u"#d4e4f7"
1379         )
1380     }
1381     for r_idx, row in enumerate(csv_lst[1:]):
1382         if int(row[4]):
1383             color = u"regression"
1384         elif int(row[5]):
1385             color = u"progression"
1386         else:
1387             color = u"normal"
1388         trow = ET.SubElement(
1389             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1390         )
1391
1392         # Columns:
1393         for c_idx, item in enumerate(row):
1394             tdata = ET.SubElement(
1395                 trow,
1396                 u"td",
1397                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1398             )
1399             # Name:
1400             if c_idx == 0 and table.get(u"add-links", True):
1401                 ref = ET.SubElement(
1402                     tdata,
1403                     u"a",
1404                     attrib=dict(
1405                         href=f"{lnk_dir}"
1406                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1407                         f"{lnk_sufix}"
1408                     )
1409                 )
1410                 ref.text = item
1411             else:
1412                 tdata.text = item
1413     try:
1414         with open(table[u"output-file"], u'w') as html_file:
1415             logging.info(f"    Writing file: {table[u'output-file']}")
1416             html_file.write(u".. raw:: html\n\n\t")
1417             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1418             html_file.write(u"\n\t<p><br><br></p>\n")
1419     except KeyError:
1420         logging.warning(u"The output file is not defined.")
1421         return
1422
1423
1424 def table_last_failed_tests(table, input_data):
1425     """Generate the table(s) with algorithm: table_last_failed_tests
1426     specified in the specification file.
1427
1428     :param table: Table to generate.
1429     :param input_data: Data to process.
1430     :type table: pandas.Series
1431     :type input_data: InputData
1432     """
1433
1434     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1435
1436     # Transform the data
1437     logging.info(
1438         f"    Creating the data set for the {table.get(u'type', u'')} "
1439         f"{table.get(u'title', u'')}."
1440     )
1441
1442     data = input_data.filter_data(table, continue_on_error=True)
1443
1444     if data is None or data.empty:
1445         logging.warning(
1446             f"    No data for the {table.get(u'type', u'')} "
1447             f"{table.get(u'title', u'')}."
1448         )
1449         return
1450
1451     tbl_list = list()
1452     for job, builds in table[u"data"].items():
1453         for build in builds:
1454             build = str(build)
1455             try:
1456                 version = input_data.metadata(job, build).get(u"version", u"")
1457                 duration = \
1458                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1459             except KeyError:
1460                 logging.error(f"Data for {job}: {build} is not present.")
1461                 return
1462             tbl_list.append(build)
1463             tbl_list.append(version)
1464             failed_tests = list()
1465             passed = 0
1466             failed = 0
1467             for tst_data in data[job][build].values:
1468                 if tst_data[u"status"] != u"FAIL":
1469                     passed += 1
1470                     continue
1471                 failed += 1
1472                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1473                 if not groups:
1474                     continue
1475                 nic = groups.group(0)
1476                 msg = tst_data[u'msg'].replace(u"\n", u"")
1477                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1478                              'xxx.xxx.xxx.xxx', msg)
1479                 msg = msg.split(u'Also teardown failed')[0]
1480                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1481             tbl_list.append(passed)
1482             tbl_list.append(failed)
1483             tbl_list.append(duration)
1484             tbl_list.extend(failed_tests)
1485
1486     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1487     logging.info(f"    Writing file: {file_name}")
1488     with open(file_name, u"wt") as file_handler:
1489         for test in tbl_list:
1490             file_handler.write(f"{test}\n")
1491
1492
1493 def table_failed_tests(table, input_data):
1494     """Generate the table(s) with algorithm: table_failed_tests
1495     specified in the specification file.
1496
1497     :param table: Table to generate.
1498     :param input_data: Data to process.
1499     :type table: pandas.Series
1500     :type input_data: InputData
1501     """
1502
1503     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1504
1505     # Transform the data
1506     logging.info(
1507         f"    Creating the data set for the {table.get(u'type', u'')} "
1508         f"{table.get(u'title', u'')}."
1509     )
1510     data = input_data.filter_data(table, continue_on_error=True)
1511
1512     test_type = u"MRR"
1513     if u"NDRPDR" in table.get(u"filter", list()):
1514         test_type = u"NDRPDR"
1515
1516     # Prepare the header of the tables
1517     header = [
1518         u"Test Case",
1519         u"Failures [#]",
1520         u"Last Failure [Time]",
1521         u"Last Failure [VPP-Build-Id]",
1522         u"Last Failure [CSIT-Job-Build-Id]"
1523     ]
1524
1525     # Generate the data for the table according to the model in the table
1526     # specification
1527
1528     now = dt.utcnow()
1529     timeperiod = timedelta(int(table.get(u"window", 7)))
1530
1531     tbl_dict = dict()
1532     for job, builds in table[u"data"].items():
1533         for build in builds:
1534             build = str(build)
1535             for tst_name, tst_data in data[job][build].items():
1536                 if tst_name.lower() in table.get(u"ignore-list", list()):
1537                     continue
1538                 if tbl_dict.get(tst_name, None) is None:
1539                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1540                     if not groups:
1541                         continue
1542                     nic = groups.group(0)
1543                     tbl_dict[tst_name] = {
1544                         u"name": f"{nic}-{tst_data[u'name']}",
1545                         u"data": OrderedDict()
1546                     }
1547                 try:
1548                     generated = input_data.metadata(job, build).\
1549                         get(u"generated", u"")
1550                     if not generated:
1551                         continue
1552                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1553                     if (now - then) <= timeperiod:
1554                         tbl_dict[tst_name][u"data"][build] = (
1555                             tst_data[u"status"],
1556                             generated,
1557                             input_data.metadata(job, build).get(u"version",
1558                                                                 u""),
1559                             build
1560                         )
1561                 except (TypeError, KeyError) as err:
1562                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1563
1564     max_fails = 0
1565     tbl_lst = list()
1566     for tst_data in tbl_dict.values():
1567         fails_nr = 0
1568         fails_last_date = u""
1569         fails_last_vpp = u""
1570         fails_last_csit = u""
1571         for val in tst_data[u"data"].values():
1572             if val[0] == u"FAIL":
1573                 fails_nr += 1
1574                 fails_last_date = val[1]
1575                 fails_last_vpp = val[2]
1576                 fails_last_csit = val[3]
1577         if fails_nr:
1578             max_fails = fails_nr if fails_nr > max_fails else max_fails
1579             tbl_lst.append([
1580                 tst_data[u"name"],
1581                 fails_nr,
1582                 fails_last_date,
1583                 fails_last_vpp,
1584                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1585                 f"-build-{fails_last_csit}"
1586             ])
1587
1588     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1589     tbl_sorted = list()
1590     for nrf in range(max_fails, -1, -1):
1591         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1592         tbl_sorted.extend(tbl_fails)
1593
1594     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1595     logging.info(f"    Writing file: {file_name}")
1596     with open(file_name, u"wt") as file_handler:
1597         file_handler.write(u",".join(header) + u"\n")
1598         for test in tbl_sorted:
1599             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1600
1601     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1602     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1603
1604
1605 def table_failed_tests_html(table, input_data):
1606     """Generate the table(s) with algorithm: table_failed_tests_html
1607     specified in the specification file.
1608
1609     :param table: Table to generate.
1610     :param input_data: Data to process.
1611     :type table: pandas.Series
1612     :type input_data: InputData
1613     """
1614
1615     _ = input_data
1616
1617     if not table.get(u"testbed", None):
1618         logging.error(
1619             f"The testbed is not defined for the table "
1620             f"{table.get(u'title', u'')}. Skipping."
1621         )
1622         return
1623
1624     test_type = table.get(u"test-type", u"MRR")
1625     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1626         logging.error(
1627             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1628             f"Skipping."
1629         )
1630         return
1631
1632     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1633         lnk_dir = u"../ndrpdr_trending/"
1634         lnk_sufix = u"-pdr"
1635     else:
1636         lnk_dir = u"../trending/"
1637         lnk_sufix = u""
1638
1639     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1640
1641     try:
1642         with open(table[u"input-file"], u'rt') as csv_file:
1643             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1644     except KeyError:
1645         logging.warning(u"The input file is not defined.")
1646         return
1647     except csv.Error as err:
1648         logging.warning(
1649             f"Not possible to process the file {table[u'input-file']}.\n"
1650             f"{repr(err)}"
1651         )
1652         return
1653
1654     # Table:
1655     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1656
1657     # Table header:
1658     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1659     for idx, item in enumerate(csv_lst[0]):
1660         alignment = u"left" if idx == 0 else u"center"
1661         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1662         thead.text = item
1663
1664     # Rows:
1665     colors = (u"#e9f1fb", u"#d4e4f7")
1666     for r_idx, row in enumerate(csv_lst[1:]):
1667         background = colors[r_idx % 2]
1668         trow = ET.SubElement(
1669             failed_tests, u"tr", attrib=dict(bgcolor=background)
1670         )
1671
1672         # Columns:
1673         for c_idx, item in enumerate(row):
1674             tdata = ET.SubElement(
1675                 trow,
1676                 u"td",
1677                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1678             )
1679             # Name:
1680             if c_idx == 0 and table.get(u"add-links", True):
1681                 ref = ET.SubElement(
1682                     tdata,
1683                     u"a",
1684                     attrib=dict(
1685                         href=f"{lnk_dir}"
1686                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1687                         f"{lnk_sufix}"
1688                     )
1689                 )
1690                 ref.text = item
1691             else:
1692                 tdata.text = item
1693     try:
1694         with open(table[u"output-file"], u'w') as html_file:
1695             logging.info(f"    Writing file: {table[u'output-file']}")
1696             html_file.write(u".. raw:: html\n\n\t")
1697             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1698             html_file.write(u"\n\t<p><br><br></p>\n")
1699     except KeyError:
1700         logging.warning(u"The output file is not defined.")
1701         return
1702
1703
1704 def table_comparison(table, input_data):
1705     """Generate the table(s) with algorithm: table_comparison
1706     specified in the specification file.
1707
1708     :param table: Table to generate.
1709     :param input_data: Data to process.
1710     :type table: pandas.Series
1711     :type input_data: InputData
1712     """
1713     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1714
1715     # Transform the data
1716     logging.info(
1717         f"    Creating the data set for the {table.get(u'type', u'')} "
1718         f"{table.get(u'title', u'')}."
1719     )
1720
1721     columns = table.get(u"columns", None)
1722     if not columns:
1723         logging.error(
1724             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1725         )
1726         return
1727
1728     cols = list()
1729     for idx, col in enumerate(columns):
1730         if col.get(u"data-set", None) is None:
1731             logging.warning(f"No data for column {col.get(u'title', u'')}")
1732             continue
1733         tag = col.get(u"tag", None)
1734         data = input_data.filter_data(
1735             table,
1736             params=[
1737                 u"throughput",
1738                 u"result",
1739                 u"latency",
1740                 u"name",
1741                 u"parent",
1742                 u"tags"
1743             ],
1744             data=col[u"data-set"],
1745             continue_on_error=True
1746         )
1747         col_data = {
1748             u"title": col.get(u"title", f"Column{idx}"),
1749             u"data": dict()
1750         }
1751         for builds in data.values:
1752             for build in builds:
1753                 for tst_name, tst_data in build.items():
1754                     if tag and tag not in tst_data[u"tags"]:
1755                         continue
1756                     tst_name_mod = \
1757                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1758                         replace(u"2n1l-", u"")
1759                     if col_data[u"data"].get(tst_name_mod, None) is None:
1760                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1761                         if u"across testbeds" in table[u"title"].lower() or \
1762                                 u"across topologies" in table[u"title"].lower():
1763                             name = _tpc_modify_displayed_test_name(name)
1764                         col_data[u"data"][tst_name_mod] = {
1765                             u"name": name,
1766                             u"replace": True,
1767                             u"data": list(),
1768                             u"mean": None,
1769                             u"stdev": None
1770                         }
1771                     _tpc_insert_data(
1772                         target=col_data[u"data"][tst_name_mod],
1773                         src=tst_data,
1774                         include_tests=table[u"include-tests"]
1775                     )
1776
1777         replacement = col.get(u"data-replacement", None)
1778         if replacement:
1779             rpl_data = input_data.filter_data(
1780                 table,
1781                 params=[
1782                     u"throughput",
1783                     u"result",
1784                     u"latency",
1785                     u"name",
1786                     u"parent",
1787                     u"tags"
1788                 ],
1789                 data=replacement,
1790                 continue_on_error=True
1791             )
1792             for builds in rpl_data.values:
1793                 for build in builds:
1794                     for tst_name, tst_data in build.items():
1795                         if tag and tag not in tst_data[u"tags"]:
1796                             continue
1797                         tst_name_mod = \
1798                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1799                             replace(u"2n1l-", u"")
1800                         if col_data[u"data"].get(tst_name_mod, None) is None:
1801                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1802                             if u"across testbeds" in table[u"title"].lower() \
1803                                     or u"across topologies" in \
1804                                     table[u"title"].lower():
1805                                 name = _tpc_modify_displayed_test_name(name)
1806                             col_data[u"data"][tst_name_mod] = {
1807                                 u"name": name,
1808                                 u"replace": False,
1809                                 u"data": list(),
1810                                 u"mean": None,
1811                                 u"stdev": None
1812                             }
1813                         if col_data[u"data"][tst_name_mod][u"replace"]:
1814                             col_data[u"data"][tst_name_mod][u"replace"] = False
1815                             col_data[u"data"][tst_name_mod][u"data"] = list()
1816                         _tpc_insert_data(
1817                             target=col_data[u"data"][tst_name_mod],
1818                             src=tst_data,
1819                             include_tests=table[u"include-tests"]
1820                         )
1821
1822         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1823                 u"latency" in table[u"include-tests"]:
1824             for tst_name, tst_data in col_data[u"data"].items():
1825                 if tst_data[u"data"]:
1826                     tst_data[u"mean"] = mean(tst_data[u"data"])
1827                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1828
1829         cols.append(col_data)
1830
1831     tbl_dict = dict()
1832     for col in cols:
1833         for tst_name, tst_data in col[u"data"].items():
1834             if tbl_dict.get(tst_name, None) is None:
1835                 tbl_dict[tst_name] = {
1836                     "name": tst_data[u"name"]
1837                 }
1838             tbl_dict[tst_name][col[u"title"]] = {
1839                 u"mean": tst_data[u"mean"],
1840                 u"stdev": tst_data[u"stdev"]
1841             }
1842
1843     if not tbl_dict:
1844         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1845         return
1846
1847     tbl_lst = list()
1848     for tst_data in tbl_dict.values():
1849         row = [tst_data[u"name"], ]
1850         for col in cols:
1851             row.append(tst_data.get(col[u"title"], None))
1852         tbl_lst.append(row)
1853
1854     comparisons = table.get(u"comparisons", None)
1855     rcas = list()
1856     if comparisons and isinstance(comparisons, list):
1857         for idx, comp in enumerate(comparisons):
1858             try:
1859                 col_ref = int(comp[u"reference"])
1860                 col_cmp = int(comp[u"compare"])
1861             except KeyError:
1862                 logging.warning(u"Comparison: No references defined! Skipping.")
1863                 comparisons.pop(idx)
1864                 continue
1865             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1866                     col_ref == col_cmp):
1867                 logging.warning(f"Wrong values of reference={col_ref} "
1868                                 f"and/or compare={col_cmp}. Skipping.")
1869                 comparisons.pop(idx)
1870                 continue
1871             rca_file_name = comp.get(u"rca-file", None)
1872             if rca_file_name:
1873                 try:
1874                     with open(rca_file_name, u"r") as file_handler:
1875                         rcas.append(
1876                             {
1877                                 u"title": f"RCA{idx + 1}",
1878                                 u"data": load(file_handler, Loader=FullLoader)
1879                             }
1880                         )
1881                 except (YAMLError, IOError) as err:
1882                     logging.warning(
1883                         f"The RCA file {rca_file_name} does not exist or "
1884                         f"it is corrupted!"
1885                     )
1886                     logging.debug(repr(err))
1887                     rcas.append(None)
1888             else:
1889                 rcas.append(None)
1890     else:
1891         comparisons = None
1892
1893     tbl_cmp_lst = list()
1894     if comparisons:
1895         for row in tbl_lst:
1896             new_row = deepcopy(row)
1897             for comp in comparisons:
1898                 ref_itm = row[int(comp[u"reference"])]
1899                 if ref_itm is None and \
1900                         comp.get(u"reference-alt", None) is not None:
1901                     ref_itm = row[int(comp[u"reference-alt"])]
1902                 cmp_itm = row[int(comp[u"compare"])]
1903                 if ref_itm is not None and cmp_itm is not None and \
1904                         ref_itm[u"mean"] is not None and \
1905                         cmp_itm[u"mean"] is not None and \
1906                         ref_itm[u"stdev"] is not None and \
1907                         cmp_itm[u"stdev"] is not None:
1908                     try:
1909                         delta, d_stdev = relative_change_stdev(
1910                             ref_itm[u"mean"], cmp_itm[u"mean"],
1911                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1912                         )
1913                     except ZeroDivisionError:
1914                         break
1915                     if delta is None or math.isnan(delta):
1916                         break
1917                     new_row.append({
1918                         u"mean": delta * 1e6,
1919                         u"stdev": d_stdev * 1e6
1920                     })
1921                 else:
1922                     break
1923             else:
1924                 tbl_cmp_lst.append(new_row)
1925
1926     try:
1927         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1928         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1929     except TypeError as err:
1930         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1931
1932     tbl_for_csv = list()
1933     for line in tbl_cmp_lst:
1934         row = [line[0], ]
1935         for idx, itm in enumerate(line[1:]):
1936             if itm is None or not isinstance(itm, dict) or\
1937                     itm.get(u'mean', None) is None or \
1938                     itm.get(u'stdev', None) is None:
1939                 row.append(u"NT")
1940                 row.append(u"NT")
1941             else:
1942                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1943                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1944         for rca in rcas:
1945             if rca is None:
1946                 continue
1947             rca_nr = rca[u"data"].get(row[0], u"-")
1948             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1949         tbl_for_csv.append(row)
1950
1951     header_csv = [u"Test Case", ]
1952     for col in cols:
1953         header_csv.append(f"Avg({col[u'title']})")
1954         header_csv.append(f"Stdev({col[u'title']})")
1955     for comp in comparisons:
1956         header_csv.append(
1957             f"Avg({comp.get(u'title', u'')})"
1958         )
1959         header_csv.append(
1960             f"Stdev({comp.get(u'title', u'')})"
1961         )
1962     for rca in rcas:
1963         if rca:
1964             header_csv.append(rca[u"title"])
1965
1966     legend_lst = table.get(u"legend", None)
1967     if legend_lst is None:
1968         legend = u""
1969     else:
1970         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1971
1972     footnote = u""
1973     if rcas and any(rcas):
1974         footnote += u"\nRoot Cause Analysis:\n"
1975         for rca in rcas:
1976             if rca:
1977                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1978
1979     csv_file_name = f"{table[u'output-file']}-csv.csv"
1980     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1981         file_handler.write(
1982             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1983         )
1984         for test in tbl_for_csv:
1985             file_handler.write(
1986                 u",".join([f'"{item}"' for item in test]) + u"\n"
1987             )
1988         if legend_lst:
1989             for item in legend_lst:
1990                 file_handler.write(f'"{item}"\n')
1991         if footnote:
1992             for itm in footnote.split(u"\n"):
1993                 file_handler.write(f'"{itm}"\n')
1994
1995     tbl_tmp = list()
1996     max_lens = [0, ] * len(tbl_cmp_lst[0])
1997     for line in tbl_cmp_lst:
1998         row = [line[0], ]
1999         for idx, itm in enumerate(line[1:]):
2000             if itm is None or not isinstance(itm, dict) or \
2001                     itm.get(u'mean', None) is None or \
2002                     itm.get(u'stdev', None) is None:
2003                 new_itm = u"NT"
2004             else:
2005                 if idx < len(cols):
2006                     new_itm = (
2007                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
2008                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2009                         replace(u"nan", u"NaN")
2010                     )
2011                 else:
2012                     new_itm = (
2013                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2014                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2015                         replace(u"nan", u"NaN")
2016                     )
2017             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2018                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2019             row.append(new_itm)
2020
2021         tbl_tmp.append(row)
2022
2023     header = [u"Test Case", ]
2024     header.extend([col[u"title"] for col in cols])
2025     header.extend([comp.get(u"title", u"") for comp in comparisons])
2026
2027     tbl_final = list()
2028     for line in tbl_tmp:
2029         row = [line[0], ]
2030         for idx, itm in enumerate(line[1:]):
2031             if itm in (u"NT", u"NaN"):
2032                 row.append(itm)
2033                 continue
2034             itm_lst = itm.rsplit(u"\u00B1", 1)
2035             itm_lst[-1] = \
2036                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2037             itm_str = u"\u00B1".join(itm_lst)
2038
2039             if idx >= len(cols):
2040                 # Diffs
2041                 rca = rcas[idx - len(cols)]
2042                 if rca:
2043                     # Add rcas to diffs
2044                     rca_nr = rca[u"data"].get(row[0], None)
2045                     if rca_nr:
2046                         hdr_len = len(header[idx + 1]) - 1
2047                         if hdr_len < 19:
2048                             hdr_len = 19
2049                         rca_nr = f"[{rca_nr}]"
2050                         itm_str = (
2051                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2052                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2053                             f"{itm_str}"
2054                         )
2055             row.append(itm_str)
2056         tbl_final.append(row)
2057
2058     # Generate csv tables:
2059     csv_file_name = f"{table[u'output-file']}.csv"
2060     logging.info(f"    Writing the file {csv_file_name}")
2061     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2062         file_handler.write(u";".join(header) + u"\n")
2063         for test in tbl_final:
2064             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2065
2066     # Generate txt table:
2067     txt_file_name = f"{table[u'output-file']}.txt"
2068     logging.info(f"    Writing the file {txt_file_name}")
2069     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2070
2071     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2072         file_handler.write(legend)
2073         file_handler.write(footnote)
2074
2075     # Generate html table:
2076     _tpc_generate_html_table(
2077         header,
2078         tbl_final,
2079         table[u'output-file'],
2080         legend=legend,
2081         footnote=footnote,
2082         sort_data=False,
2083         title=table.get(u"title", u"")
2084     )
2085
2086
2087 def table_weekly_comparison(table, in_data):
2088     """Generate the table(s) with algorithm: table_weekly_comparison
2089     specified in the specification file.
2090
2091     :param table: Table to generate.
2092     :param in_data: Data to process.
2093     :type table: pandas.Series
2094     :type in_data: InputData
2095     """
2096     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2097
2098     # Transform the data
2099     logging.info(
2100         f"    Creating the data set for the {table.get(u'type', u'')} "
2101         f"{table.get(u'title', u'')}."
2102     )
2103
2104     incl_tests = table.get(u"include-tests", None)
2105     if incl_tests not in (u"NDR", u"PDR"):
2106         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2107         return
2108
2109     nr_cols = table.get(u"nr-of-data-columns", None)
2110     if not nr_cols or nr_cols < 2:
2111         logging.error(
2112             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2113         )
2114         return
2115
2116     data = in_data.filter_data(
2117         table,
2118         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2119         continue_on_error=True
2120     )
2121
2122     header = [
2123         [u"VPP Version", ],
2124         [u"Start Timestamp", ],
2125         [u"CSIT Build", ],
2126         [u"CSIT Testbed", ]
2127     ]
2128     tbl_dict = dict()
2129     idx = 0
2130     tb_tbl = table.get(u"testbeds", None)
2131     for job_name, job_data in data.items():
2132         for build_nr, build in job_data.items():
2133             if idx >= nr_cols:
2134                 break
2135             if build.empty:
2136                 continue
2137
2138             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2139             if tb_ip and tb_tbl:
2140                 testbed = tb_tbl.get(tb_ip, u"")
2141             else:
2142                 testbed = u""
2143             header[2].insert(1, build_nr)
2144             header[3].insert(1, testbed)
2145             header[1].insert(
2146                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2147             )
2148             header[0].insert(
2149                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2150             )
2151
2152             for tst_name, tst_data in build.items():
2153                 tst_name_mod = \
2154                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2155                 if not tbl_dict.get(tst_name_mod, None):
2156                     tbl_dict[tst_name_mod] = dict(
2157                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2158                     )
2159                 try:
2160                     tbl_dict[tst_name_mod][-idx - 1] = \
2161                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2162                 except (TypeError, IndexError, KeyError, ValueError):
2163                     pass
2164             idx += 1
2165
2166     if idx < nr_cols:
2167         logging.error(u"Not enough data to build the table! Skipping")
2168         return
2169
2170     cmp_dict = dict()
2171     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2172         idx_ref = cmp.get(u"reference", None)
2173         idx_cmp = cmp.get(u"compare", None)
2174         if idx_ref is None or idx_cmp is None:
2175             continue
2176         header[0].append(
2177             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2178             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2179         )
2180         header[1].append(u"")
2181         header[2].append(u"")
2182         header[3].append(u"")
2183         for tst_name, tst_data in tbl_dict.items():
2184             if not cmp_dict.get(tst_name, None):
2185                 cmp_dict[tst_name] = list()
2186             ref_data = tst_data.get(idx_ref, None)
2187             cmp_data = tst_data.get(idx_cmp, None)
2188             if ref_data is None or cmp_data is None:
2189                 cmp_dict[tst_name].append(float(u'nan'))
2190             else:
2191                 cmp_dict[tst_name].append(
2192                     relative_change(ref_data, cmp_data)
2193                 )
2194
2195     tbl_lst_none = list()
2196     tbl_lst = list()
2197     for tst_name, tst_data in tbl_dict.items():
2198         itm_lst = [tst_data[u"name"], ]
2199         for idx in range(nr_cols):
2200             item = tst_data.get(-idx - 1, None)
2201             if item is None:
2202                 itm_lst.insert(1, None)
2203             else:
2204                 itm_lst.insert(1, round(item / 1e6, 1))
2205         itm_lst.extend(
2206             [
2207                 None if itm is None else round(itm, 1)
2208                 for itm in cmp_dict[tst_name]
2209             ]
2210         )
2211         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2212             tbl_lst_none.append(itm_lst)
2213         else:
2214             tbl_lst.append(itm_lst)
2215
2216     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2217     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2218     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2219     tbl_lst.extend(tbl_lst_none)
2220
2221     # Generate csv table:
2222     csv_file_name = f"{table[u'output-file']}.csv"
2223     logging.info(f"    Writing the file {csv_file_name}")
2224     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2225         for hdr in header:
2226             file_handler.write(u",".join(hdr) + u"\n")
2227         for test in tbl_lst:
2228             file_handler.write(u",".join(
2229                 [
2230                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2231                     replace(u"null", u"-") for item in test
2232                 ]
2233             ) + u"\n")
2234
2235     txt_file_name = f"{table[u'output-file']}.txt"
2236     logging.info(f"    Writing the file {txt_file_name}")
2237     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2238
2239     # Reorganize header in txt table
2240     txt_table = list()
2241     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2242         for line in list(file_handler):
2243             txt_table.append(line)
2244     try:
2245         txt_table.insert(5, txt_table.pop(2))
2246         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2247             file_handler.writelines(txt_table)
2248     except IndexError:
2249         pass
2250
2251     # Generate html table:
2252     hdr_html = [
2253         u"<br>".join(row) for row in zip(*header)
2254     ]
2255     _tpc_generate_html_table(
2256         hdr_html,
2257         tbl_lst,
2258         table[u'output-file'],
2259         sort_data=True,
2260         title=table.get(u"title", u""),
2261         generate_rst=False
2262     )