28f42ec0899993d98a2e37dd46f7fcfbafe5aae4
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     if gdata[u"vectors"] > 0:
301                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
302                     elif gdata[u"calls"] > 0:
303                         clocks = gdata[u"clocks"] / gdata[u"calls"]
304                     elif gdata[u"suspends"] > 0:
305                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
306                     else:
307                         clocks = 0.0
308                     if gdata[u"calls"] > 0:
309                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
310                     else:
311                         vectors_call = 0.0
312                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
313                             int(gdata[u"suspends"]):
314                         threads[idx].append([
315                             gnode,
316                             int(gdata[u"calls"]),
317                             int(gdata[u"vectors"]),
318                             int(gdata[u"suspends"]),
319                             clocks,
320                             vectors_call
321                         ])
322
323             bold = ET.SubElement(tcol, u"b")
324             bold.text = (
325                 f"Host IP: {dut_data.get(u'host', '')}, "
326                 f"Socket: {dut_data.get(u'socket', '')}"
327             )
328             trow = ET.SubElement(
329                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
330             )
331             thead = ET.SubElement(
332                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
333             )
334             thead.text = u"\t"
335
336             for thread_nr, thread in threads.items():
337                 trow = ET.SubElement(
338                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
339                 )
340                 tcol = ET.SubElement(
341                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
342                 )
343                 bold = ET.SubElement(tcol, u"b")
344                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
345                 trow = ET.SubElement(
346                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
347                 )
348                 for idx, col in enumerate(tbl_hdr):
349                     tcol = ET.SubElement(
350                         trow, u"td",
351                         attrib=dict(align=u"right" if idx else u"left")
352                     )
353                     font = ET.SubElement(
354                         tcol, u"font", attrib=dict(size=u"2")
355                     )
356                     bold = ET.SubElement(font, u"b")
357                     bold.text = col
358                 for row_nr, row in enumerate(thread):
359                     trow = ET.SubElement(
360                         tbl, u"tr",
361                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
362                     )
363                     for idx, col in enumerate(row):
364                         tcol = ET.SubElement(
365                             trow, u"td",
366                             attrib=dict(align=u"right" if idx else u"left")
367                         )
368                         font = ET.SubElement(
369                             tcol, u"font", attrib=dict(size=u"2")
370                         )
371                         if isinstance(col, float):
372                             font.text = f"{col:.2f}"
373                         else:
374                             font.text = str(col)
375                 trow = ET.SubElement(
376                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
377                 )
378                 thead = ET.SubElement(
379                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
380                 )
381                 thead.text = u"\t"
382
383         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
384         thead = ET.SubElement(
385             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
386         )
387         font = ET.SubElement(
388             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
389         )
390         font.text = u"."
391
392         return str(ET.tostring(tbl, encoding=u"unicode"))
393
394     for suite in suites.values:
395         html_table = str()
396         for test_data in data.values:
397             if test_data[u"parent"] not in suite[u"name"]:
398                 continue
399             html_table += _generate_html_table(test_data)
400         if not html_table:
401             continue
402         try:
403             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
404             with open(f"{file_name}", u'w') as html_file:
405                 logging.info(f"    Writing file: {file_name}")
406                 html_file.write(u".. raw:: html\n\n\t")
407                 html_file.write(html_table)
408                 html_file.write(u"\n\t<p><br><br></p>\n")
409         except KeyError:
410             logging.warning(u"The output file is not defined.")
411             return
412     logging.info(u"  Done.")
413
414
415 def table_merged_details(table, input_data):
416     """Generate the table(s) with algorithm: table_merged_details
417     specified in the specification file.
418
419     :param table: Table to generate.
420     :param input_data: Data to process.
421     :type table: pandas.Series
422     :type input_data: InputData
423     """
424
425     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
426
427     # Transform the data
428     logging.info(
429         f"    Creating the data set for the {table.get(u'type', u'')} "
430         f"{table.get(u'title', u'')}."
431     )
432     data = input_data.filter_data(table, continue_on_error=True)
433     data = input_data.merge_data(data)
434
435     sort_tests = table.get(u"sort", None)
436     if sort_tests:
437         args = dict(
438             inplace=True,
439             ascending=(sort_tests == u"ascending")
440         )
441         data.sort_index(**args)
442
443     suites = input_data.filter_data(
444         table, continue_on_error=True, data_set=u"suites")
445     suites = input_data.merge_data(suites)
446
447     # Prepare the header of the tables
448     header = list()
449     for column in table[u"columns"]:
450         header.append(
451             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
452         )
453
454     for suite in suites.values:
455         # Generate data
456         suite_name = suite[u"name"]
457         table_lst = list()
458         for test in data.keys():
459             if data[test][u"status"] != u"PASS" or \
460                     data[test][u"parent"] not in suite_name:
461                 continue
462             row_lst = list()
463             for column in table[u"columns"]:
464                 try:
465                     col_data = str(data[test][column[
466                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
467                     # Do not include tests with "Test Failed" in test message
468                     if u"Test Failed" in col_data:
469                         continue
470                     col_data = col_data.replace(
471                         u"No Data", u"Not Captured     "
472                     )
473                     if column[u"data"].split(u" ")[1] in (u"name", ):
474                         if len(col_data) > 30:
475                             col_data_lst = col_data.split(u"-")
476                             half = int(len(col_data_lst) / 2)
477                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
478                                        f"- |br| " \
479                                        f"{u'-'.join(col_data_lst[half:])}"
480                         col_data = f" |prein| {col_data} |preout| "
481                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
482                         # Temporary solution: remove NDR results from message:
483                         if bool(table.get(u'remove-ndr', False)):
484                             try:
485                                 col_data = col_data.split(u"\n", 1)[1]
486                             except IndexError:
487                                 pass
488                         col_data = col_data.replace(u'\n', u' |br| ').\
489                             replace(u'\r', u'').replace(u'"', u"'")
490                         col_data = f" |prein| {col_data} |preout| "
491                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
492                         col_data = col_data.replace(u'\n', u' |br| ')
493                         col_data = f" |prein| {col_data[:-5]} |preout| "
494                     row_lst.append(f'"{col_data}"')
495                 except KeyError:
496                     row_lst.append(u'"Not captured"')
497             if len(row_lst) == len(table[u"columns"]):
498                 table_lst.append(row_lst)
499
500         # Write the data to file
501         if table_lst:
502             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
503             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
504             logging.info(f"      Writing file: {file_name}")
505             with open(file_name, u"wt") as file_handler:
506                 file_handler.write(u",".join(header) + u"\n")
507                 for item in table_lst:
508                     file_handler.write(u",".join(item) + u"\n")
509
510     logging.info(u"  Done.")
511
512
513 def _tpc_modify_test_name(test_name, ignore_nic=False):
514     """Modify a test name by replacing its parts.
515
516     :param test_name: Test name to be modified.
517     :param ignore_nic: If True, NIC is removed from TC name.
518     :type test_name: str
519     :type ignore_nic: bool
520     :returns: Modified test name.
521     :rtype: str
522     """
523     test_name_mod = test_name.\
524         replace(u"-ndrpdr", u"").\
525         replace(u"1t1c", u"1c").\
526         replace(u"2t1c", u"1c"). \
527         replace(u"2t2c", u"2c").\
528         replace(u"4t2c", u"2c"). \
529         replace(u"4t4c", u"4c").\
530         replace(u"8t4c", u"4c")
531
532     if ignore_nic:
533         return re.sub(REGEX_NIC, u"", test_name_mod)
534     return test_name_mod
535
536
537 def _tpc_modify_displayed_test_name(test_name):
538     """Modify a test name which is displayed in a table by replacing its parts.
539
540     :param test_name: Test name to be modified.
541     :type test_name: str
542     :returns: Modified test name.
543     :rtype: str
544     """
545     return test_name.\
546         replace(u"1t1c", u"1c").\
547         replace(u"2t1c", u"1c"). \
548         replace(u"2t2c", u"2c").\
549         replace(u"4t2c", u"2c"). \
550         replace(u"4t4c", u"4c").\
551         replace(u"8t4c", u"4c")
552
553
554 def _tpc_insert_data(target, src, include_tests):
555     """Insert src data to the target structure.
556
557     :param target: Target structure where the data is placed.
558     :param src: Source data to be placed into the target structure.
559     :param include_tests: Which results will be included (MRR, NDR, PDR).
560     :type target: list
561     :type src: dict
562     :type include_tests: str
563     """
564     try:
565         if include_tests == u"MRR":
566             target[u"mean"] = src[u"result"][u"receive-rate"]
567             target[u"stdev"] = src[u"result"][u"receive-stdev"]
568         elif include_tests == u"PDR":
569             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
570         elif include_tests == u"NDR":
571             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
572         elif u"latency" in include_tests:
573             keys = include_tests.split(u"-")
574             if len(keys) == 4:
575                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
576                 target[u"data"].append(
577                     float(u"nan") if lat == -1 else lat * 1e6
578                 )
579     except (KeyError, TypeError):
580         pass
581
582
583 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
584                              footnote=u"", sort_data=True, title=u"",
585                              generate_rst=True):
586     """Generate html table from input data with simple sorting possibility.
587
588     :param header: Table header.
589     :param data: Input data to be included in the table. It is a list of lists.
590         Inner lists are rows in the table. All inner lists must be of the same
591         length. The length of these lists must be the same as the length of the
592         header.
593     :param out_file_name: The name (relative or full path) where the
594         generated html table is written.
595     :param legend: The legend to display below the table.
596     :param footnote: The footnote to display below the table (and legend).
597     :param sort_data: If True the data sorting is enabled.
598     :param title: The table (and file) title.
599     :param generate_rst: If True, wrapping rst file is generated.
600     :type header: list
601     :type data: list of lists
602     :type out_file_name: str
603     :type legend: str
604     :type footnote: str
605     :type sort_data: bool
606     :type title: str
607     :type generate_rst: bool
608     """
609
610     try:
611         idx = header.index(u"Test Case")
612     except ValueError:
613         idx = 0
614     params = {
615         u"align-hdr": (
616             [u"left", u"right"],
617             [u"left", u"left", u"right"],
618             [u"left", u"left", u"left", u"right"]
619         ),
620         u"align-itm": (
621             [u"left", u"right"],
622             [u"left", u"left", u"right"],
623             [u"left", u"left", u"left", u"right"]
624         ),
625         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
626     }
627
628     df_data = pd.DataFrame(data, columns=header)
629
630     if sort_data:
631         df_sorted = [df_data.sort_values(
632             by=[key, header[idx]], ascending=[True, True]
633             if key != header[idx] else [False, True]) for key in header]
634         df_sorted_rev = [df_data.sort_values(
635             by=[key, header[idx]], ascending=[False, True]
636             if key != header[idx] else [True, True]) for key in header]
637         df_sorted.extend(df_sorted_rev)
638     else:
639         df_sorted = df_data
640
641     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
642                    for idx in range(len(df_data))]]
643     table_header = dict(
644         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
645         fill_color=u"#7eade7",
646         align=params[u"align-hdr"][idx],
647         font=dict(
648             family=u"Courier New",
649             size=12
650         )
651     )
652
653     fig = go.Figure()
654
655     if sort_data:
656         for table in df_sorted:
657             columns = [table.get(col) for col in header]
658             fig.add_trace(
659                 go.Table(
660                     columnwidth=params[u"width"][idx],
661                     header=table_header,
662                     cells=dict(
663                         values=columns,
664                         fill_color=fill_color,
665                         align=params[u"align-itm"][idx],
666                         font=dict(
667                             family=u"Courier New",
668                             size=12
669                         )
670                     )
671                 )
672             )
673
674         buttons = list()
675         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
676         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
677         for idx, hdr in enumerate(menu_items):
678             visible = [False, ] * len(menu_items)
679             visible[idx] = True
680             buttons.append(
681                 dict(
682                     label=hdr.replace(u" [Mpps]", u""),
683                     method=u"update",
684                     args=[{u"visible": visible}],
685                 )
686             )
687
688         fig.update_layout(
689             updatemenus=[
690                 go.layout.Updatemenu(
691                     type=u"dropdown",
692                     direction=u"down",
693                     x=0.0,
694                     xanchor=u"left",
695                     y=1.002,
696                     yanchor=u"bottom",
697                     active=len(menu_items) - 1,
698                     buttons=list(buttons)
699                 )
700             ],
701         )
702     else:
703         fig.add_trace(
704             go.Table(
705                 columnwidth=params[u"width"][idx],
706                 header=table_header,
707                 cells=dict(
708                     values=[df_sorted.get(col) for col in header],
709                     fill_color=fill_color,
710                     align=params[u"align-itm"][idx],
711                     font=dict(
712                         family=u"Courier New",
713                         size=12
714                     )
715                 )
716             )
717         )
718
719     ploff.plot(
720         fig,
721         show_link=False,
722         auto_open=False,
723         filename=f"{out_file_name}_in.html"
724     )
725
726     if not generate_rst:
727         return
728
729     file_name = out_file_name.split(u"/")[-1]
730     if u"vpp" in out_file_name:
731         path = u"_tmp/src/vpp_performance_tests/comparisons/"
732     else:
733         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
734     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
735     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
736         rst_file.write(
737             u"\n"
738             u".. |br| raw:: html\n\n    <br />\n\n\n"
739             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
740             u".. |preout| raw:: html\n\n    </pre>\n\n"
741         )
742         if title:
743             rst_file.write(f"{title}\n")
744             rst_file.write(f"{u'`' * len(title)}\n\n")
745         rst_file.write(
746             u".. raw:: html\n\n"
747             f'    <iframe frameborder="0" scrolling="no" '
748             f'width="1600" height="1200" '
749             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
750             f'</iframe>\n\n'
751         )
752
753         if legend:
754             try:
755                 itm_lst = legend[1:-2].split(u"\n")
756                 rst_file.write(
757                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
758                 )
759             except IndexError as err:
760                 logging.error(f"Legend cannot be written to html file\n{err}")
761         if footnote:
762             try:
763                 itm_lst = footnote[1:].split(u"\n")
764                 rst_file.write(
765                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
766                 )
767             except IndexError as err:
768                 logging.error(f"Footnote cannot be written to html file\n{err}")
769
770
771 def table_soak_vs_ndr(table, input_data):
772     """Generate the table(s) with algorithm: table_soak_vs_ndr
773     specified in the specification file.
774
775     :param table: Table to generate.
776     :param input_data: Data to process.
777     :type table: pandas.Series
778     :type input_data: InputData
779     """
780
781     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
782
783     # Transform the data
784     logging.info(
785         f"    Creating the data set for the {table.get(u'type', u'')} "
786         f"{table.get(u'title', u'')}."
787     )
788     data = input_data.filter_data(table, continue_on_error=True)
789
790     # Prepare the header of the table
791     try:
792         header = [
793             u"Test Case",
794             f"Avg({table[u'reference'][u'title']})",
795             f"Stdev({table[u'reference'][u'title']})",
796             f"Avg({table[u'compare'][u'title']})",
797             f"Stdev{table[u'compare'][u'title']})",
798             u"Diff",
799             u"Stdev(Diff)"
800         ]
801         header_str = u";".join(header) + u"\n"
802         legend = (
803             u"\nLegend:\n"
804             f"Avg({table[u'reference'][u'title']}): "
805             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
806             f"from a series of runs of the listed tests.\n"
807             f"Stdev({table[u'reference'][u'title']}): "
808             f"Standard deviation value of {table[u'reference'][u'title']} "
809             f"[Mpps] computed from a series of runs of the listed tests.\n"
810             f"Avg({table[u'compare'][u'title']}): "
811             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
812             f"a series of runs of the listed tests.\n"
813             f"Stdev({table[u'compare'][u'title']}): "
814             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
815             f"computed from a series of runs of the listed tests.\n"
816             f"Diff({table[u'reference'][u'title']},"
817             f"{table[u'compare'][u'title']}): "
818             f"Percentage change calculated for mean values.\n"
819             u"Stdev(Diff): "
820             u"Standard deviation of percentage change calculated for mean "
821             u"values."
822         )
823     except (AttributeError, KeyError) as err:
824         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
825         return
826
827     # Create a list of available SOAK test results:
828     tbl_dict = dict()
829     for job, builds in table[u"compare"][u"data"].items():
830         for build in builds:
831             for tst_name, tst_data in data[job][str(build)].items():
832                 if tst_data[u"type"] == u"SOAK":
833                     tst_name_mod = tst_name.replace(u"-soak", u"")
834                     if tbl_dict.get(tst_name_mod, None) is None:
835                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
836                         nic = groups.group(0) if groups else u""
837                         name = (
838                             f"{nic}-"
839                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
840                         )
841                         tbl_dict[tst_name_mod] = {
842                             u"name": name,
843                             u"ref-data": list(),
844                             u"cmp-data": list()
845                         }
846                     try:
847                         tbl_dict[tst_name_mod][u"cmp-data"].append(
848                             tst_data[u"throughput"][u"LOWER"])
849                     except (KeyError, TypeError):
850                         pass
851     tests_lst = tbl_dict.keys()
852
853     # Add corresponding NDR test results:
854     for job, builds in table[u"reference"][u"data"].items():
855         for build in builds:
856             for tst_name, tst_data in data[job][str(build)].items():
857                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
858                     replace(u"-mrr", u"")
859                 if tst_name_mod not in tests_lst:
860                     continue
861                 try:
862                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
863                         continue
864                     if table[u"include-tests"] == u"MRR":
865                         result = (tst_data[u"result"][u"receive-rate"],
866                                   tst_data[u"result"][u"receive-stdev"])
867                     elif table[u"include-tests"] == u"PDR":
868                         result = \
869                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
870                     elif table[u"include-tests"] == u"NDR":
871                         result = \
872                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
873                     else:
874                         result = None
875                     if result is not None:
876                         tbl_dict[tst_name_mod][u"ref-data"].append(
877                             result)
878                 except (KeyError, TypeError):
879                     continue
880
881     tbl_lst = list()
882     for tst_name in tbl_dict:
883         item = [tbl_dict[tst_name][u"name"], ]
884         data_r = tbl_dict[tst_name][u"ref-data"]
885         if data_r:
886             if table[u"include-tests"] == u"MRR":
887                 data_r_mean = data_r[0][0]
888                 data_r_stdev = data_r[0][1]
889             else:
890                 data_r_mean = mean(data_r)
891                 data_r_stdev = stdev(data_r)
892             item.append(round(data_r_mean / 1e6, 1))
893             item.append(round(data_r_stdev / 1e6, 1))
894         else:
895             data_r_mean = None
896             data_r_stdev = None
897             item.extend([None, None])
898         data_c = tbl_dict[tst_name][u"cmp-data"]
899         if data_c:
900             if table[u"include-tests"] == u"MRR":
901                 data_c_mean = data_c[0][0]
902                 data_c_stdev = data_c[0][1]
903             else:
904                 data_c_mean = mean(data_c)
905                 data_c_stdev = stdev(data_c)
906             item.append(round(data_c_mean / 1e6, 1))
907             item.append(round(data_c_stdev / 1e6, 1))
908         else:
909             data_c_mean = None
910             data_c_stdev = None
911             item.extend([None, None])
912         if data_r_mean is not None and data_c_mean is not None:
913             delta, d_stdev = relative_change_stdev(
914                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
915             try:
916                 item.append(round(delta))
917             except ValueError:
918                 item.append(delta)
919             try:
920                 item.append(round(d_stdev))
921             except ValueError:
922                 item.append(d_stdev)
923             tbl_lst.append(item)
924
925     # Sort the table according to the relative change
926     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
927
928     # Generate csv tables:
929     csv_file_name = f"{table[u'output-file']}.csv"
930     with open(csv_file_name, u"wt") as file_handler:
931         file_handler.write(header_str)
932         for test in tbl_lst:
933             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
934
935     convert_csv_to_pretty_txt(
936         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
937     )
938     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
939         file_handler.write(legend)
940
941     # Generate html table:
942     _tpc_generate_html_table(
943         header,
944         tbl_lst,
945         table[u'output-file'],
946         legend=legend,
947         title=table.get(u"title", u"")
948     )
949
950
951 def table_perf_trending_dash(table, input_data):
952     """Generate the table(s) with algorithm:
953     table_perf_trending_dash
954     specified in the specification file.
955
956     :param table: Table to generate.
957     :param input_data: Data to process.
958     :type table: pandas.Series
959     :type input_data: InputData
960     """
961
962     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
963
964     # Transform the data
965     logging.info(
966         f"    Creating the data set for the {table.get(u'type', u'')} "
967         f"{table.get(u'title', u'')}."
968     )
969     data = input_data.filter_data(table, continue_on_error=True)
970
971     # Prepare the header of the tables
972     header = [
973         u"Test Case",
974         u"Trend [Mpps]",
975         u"Short-Term Change [%]",
976         u"Long-Term Change [%]",
977         u"Regressions [#]",
978         u"Progressions [#]"
979     ]
980     header_str = u",".join(header) + u"\n"
981
982     incl_tests = table.get(u"include-tests", u"MRR")
983
984     # Prepare data to the table:
985     tbl_dict = dict()
986     for job, builds in table[u"data"].items():
987         for build in builds:
988             for tst_name, tst_data in data[job][str(build)].items():
989                 if tst_name.lower() in table.get(u"ignore-list", list()):
990                     continue
991                 if tbl_dict.get(tst_name, None) is None:
992                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
993                     if not groups:
994                         continue
995                     nic = groups.group(0)
996                     tbl_dict[tst_name] = {
997                         u"name": f"{nic}-{tst_data[u'name']}",
998                         u"data": OrderedDict()
999                     }
1000                 try:
1001                     if incl_tests == u"MRR":
1002                         tbl_dict[tst_name][u"data"][str(build)] = \
1003                             tst_data[u"result"][u"receive-rate"]
1004                     elif incl_tests == u"NDR":
1005                         tbl_dict[tst_name][u"data"][str(build)] = \
1006                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1007                     elif incl_tests == u"PDR":
1008                         tbl_dict[tst_name][u"data"][str(build)] = \
1009                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1010                 except (TypeError, KeyError):
1011                     pass  # No data in output.xml for this test
1012
1013     tbl_lst = list()
1014     for tst_name in tbl_dict:
1015         data_t = tbl_dict[tst_name][u"data"]
1016         if len(data_t) < 2:
1017             continue
1018
1019         try:
1020             classification_lst, avgs, _ = classify_anomalies(data_t)
1021         except ValueError as err:
1022             logging.info(f"{err} Skipping")
1023             return
1024
1025         win_size = min(len(data_t), table[u"window"])
1026         long_win_size = min(len(data_t), table[u"long-trend-window"])
1027
1028         try:
1029             max_long_avg = max(
1030                 [x for x in avgs[-long_win_size:-win_size]
1031                  if not isnan(x)])
1032         except ValueError:
1033             max_long_avg = nan
1034         last_avg = avgs[-1]
1035         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1036
1037         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1038             rel_change_last = nan
1039         else:
1040             rel_change_last = round(
1041                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1042
1043         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1044             rel_change_long = nan
1045         else:
1046             rel_change_long = round(
1047                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1048
1049         if classification_lst:
1050             if isnan(rel_change_last) and isnan(rel_change_long):
1051                 continue
1052             if isnan(last_avg) or isnan(rel_change_last) or \
1053                     isnan(rel_change_long):
1054                 continue
1055             tbl_lst.append(
1056                 [tbl_dict[tst_name][u"name"],
1057                  round(last_avg / 1e6, 2),
1058                  rel_change_last,
1059                  rel_change_long,
1060                  classification_lst[-win_size+1:].count(u"regression"),
1061                  classification_lst[-win_size+1:].count(u"progression")])
1062
1063     tbl_lst.sort(key=lambda rel: rel[0])
1064     tbl_lst.sort(key=lambda rel: rel[3])
1065     tbl_lst.sort(key=lambda rel: rel[2])
1066
1067     tbl_sorted = list()
1068     for nrr in range(table[u"window"], -1, -1):
1069         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1070         for nrp in range(table[u"window"], -1, -1):
1071             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1072             tbl_sorted.extend(tbl_out)
1073
1074     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1075
1076     logging.info(f"    Writing file: {file_name}")
1077     with open(file_name, u"wt") as file_handler:
1078         file_handler.write(header_str)
1079         for test in tbl_sorted:
1080             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1081
1082     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1083     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1084
1085
1086 def _generate_url(testbed, test_name):
1087     """Generate URL to a trending plot from the name of the test case.
1088
1089     :param testbed: The testbed used for testing.
1090     :param test_name: The name of the test case.
1091     :type testbed: str
1092     :type test_name: str
1093     :returns: The URL to the plot with the trending data for the given test
1094         case.
1095     :rtype str
1096     """
1097
1098     if u"x520" in test_name:
1099         nic = u"x520"
1100     elif u"x710" in test_name:
1101         nic = u"x710"
1102     elif u"xl710" in test_name:
1103         nic = u"xl710"
1104     elif u"xxv710" in test_name:
1105         nic = u"xxv710"
1106     elif u"vic1227" in test_name:
1107         nic = u"vic1227"
1108     elif u"vic1385" in test_name:
1109         nic = u"vic1385"
1110     elif u"x553" in test_name:
1111         nic = u"x553"
1112     elif u"cx556" in test_name or u"cx556a" in test_name:
1113         nic = u"cx556a"
1114     elif u"ena" in test_name:
1115         nic = u"nitro50g"
1116     else:
1117         nic = u""
1118
1119     if u"64b" in test_name:
1120         frame_size = u"64b"
1121     elif u"78b" in test_name:
1122         frame_size = u"78b"
1123     elif u"imix" in test_name:
1124         frame_size = u"imix"
1125     elif u"9000b" in test_name:
1126         frame_size = u"9000b"
1127     elif u"1518b" in test_name:
1128         frame_size = u"1518b"
1129     elif u"114b" in test_name:
1130         frame_size = u"114b"
1131     else:
1132         frame_size = u""
1133
1134     if u"1t1c" in test_name or \
1135         (u"-1c-" in test_name and
1136          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1137         cores = u"1t1c"
1138     elif u"2t2c" in test_name or \
1139          (u"-2c-" in test_name and
1140           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1141         cores = u"2t2c"
1142     elif u"4t4c" in test_name or \
1143          (u"-4c-" in test_name and
1144           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1145         cores = u"4t4c"
1146     elif u"2t1c" in test_name or \
1147          (u"-1c-" in test_name and
1148           testbed in
1149           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1150         cores = u"2t1c"
1151     elif u"4t2c" in test_name or \
1152          (u"-2c-" in test_name and
1153           testbed in
1154           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1155         cores = u"4t2c"
1156     elif u"8t4c" in test_name or \
1157          (u"-4c-" in test_name and
1158           testbed in
1159           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1160         cores = u"8t4c"
1161     else:
1162         cores = u""
1163
1164     if u"testpmd" in test_name:
1165         driver = u"testpmd"
1166     elif u"l3fwd" in test_name:
1167         driver = u"l3fwd"
1168     elif u"avf" in test_name:
1169         driver = u"avf"
1170     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1171         driver = u"af_xdp"
1172     elif u"rdma" in test_name:
1173         driver = u"rdma"
1174     elif u"dnv" in testbed or u"tsh" in testbed:
1175         driver = u"ixgbe"
1176     elif u"ena" in test_name:
1177         driver = u"ena"
1178     else:
1179         driver = u"dpdk"
1180
1181     if u"macip-iacl1s" in test_name:
1182         bsf = u"features-macip-iacl1"
1183     elif u"macip-iacl10s" in test_name:
1184         bsf = u"features-macip-iacl10"
1185     elif u"macip-iacl50s" in test_name:
1186         bsf = u"features-macip-iacl50"
1187     elif u"iacl1s" in test_name:
1188         bsf = u"features-iacl1"
1189     elif u"iacl10s" in test_name:
1190         bsf = u"features-iacl10"
1191     elif u"iacl50s" in test_name:
1192         bsf = u"features-iacl50"
1193     elif u"oacl1s" in test_name:
1194         bsf = u"features-oacl1"
1195     elif u"oacl10s" in test_name:
1196         bsf = u"features-oacl10"
1197     elif u"oacl50s" in test_name:
1198         bsf = u"features-oacl50"
1199     elif u"nat44det" in test_name:
1200         bsf = u"nat44det-bidir"
1201     elif u"nat44ed" in test_name and u"udir" in test_name:
1202         bsf = u"nat44ed-udir"
1203     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1204         bsf = u"udp-cps"
1205     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1206         bsf = u"tcp-cps"
1207     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1208         bsf = u"udp-pps"
1209     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1210         bsf = u"tcp-pps"
1211     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1212         bsf = u"udp-tput"
1213     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1214         bsf = u"tcp-tput"
1215     elif u"udpsrcscale" in test_name:
1216         bsf = u"features-udp"
1217     elif u"iacl" in test_name:
1218         bsf = u"features"
1219     elif u"policer" in test_name:
1220         bsf = u"features"
1221     elif u"adl" in test_name:
1222         bsf = u"features"
1223     elif u"cop" in test_name:
1224         bsf = u"features"
1225     elif u"nat" in test_name:
1226         bsf = u"features"
1227     elif u"macip" in test_name:
1228         bsf = u"features"
1229     elif u"scale" in test_name:
1230         bsf = u"scale"
1231     elif u"base" in test_name:
1232         bsf = u"base"
1233     else:
1234         bsf = u"base"
1235
1236     if u"114b" in test_name and u"vhost" in test_name:
1237         domain = u"vts"
1238     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1239         domain = u"nat44"
1240         if u"nat44det" in test_name:
1241             domain += u"-det-bidir"
1242         else:
1243             domain += u"-ed"
1244         if u"udir" in test_name:
1245             domain += u"-unidir"
1246         elif u"-ethip4udp-" in test_name:
1247             domain += u"-udp"
1248         elif u"-ethip4tcp-" in test_name:
1249             domain += u"-tcp"
1250         if u"-cps" in test_name:
1251             domain += u"-cps"
1252         elif u"-pps" in test_name:
1253             domain += u"-pps"
1254         elif u"-tput" in test_name:
1255             domain += u"-tput"
1256     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1257         domain = u"dpdk"
1258     elif u"memif" in test_name:
1259         domain = u"container_memif"
1260     elif u"srv6" in test_name:
1261         domain = u"srv6"
1262     elif u"vhost" in test_name:
1263         domain = u"vhost"
1264         if u"vppl2xc" in test_name:
1265             driver += u"-vpp"
1266         else:
1267             driver += u"-testpmd"
1268         if u"lbvpplacp" in test_name:
1269             bsf += u"-link-bonding"
1270     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1271         domain = u"nf_service_density_vnfc"
1272     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1273         domain = u"nf_service_density_cnfc"
1274     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1275         domain = u"nf_service_density_cnfp"
1276     elif u"ipsec" in test_name:
1277         domain = u"ipsec"
1278         if u"sw" in test_name:
1279             bsf += u"-sw"
1280         elif u"hw" in test_name:
1281             bsf += u"-hw"
1282     elif u"ethip4vxlan" in test_name:
1283         domain = u"ip4_tunnels"
1284     elif u"ethip4udpgeneve" in test_name:
1285         domain = u"ip4_tunnels"
1286     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1287         domain = u"ip4"
1288     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1289         domain = u"ip6"
1290     elif u"l2xcbase" in test_name or \
1291             u"l2xcscale" in test_name or \
1292             u"l2bdbasemaclrn" in test_name or \
1293             u"l2bdscale" in test_name or \
1294             u"l2patch" in test_name:
1295         domain = u"l2"
1296     else:
1297         domain = u""
1298
1299     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1300     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1301
1302     return file_name + anchor_name
1303
1304
1305 def table_perf_trending_dash_html(table, input_data):
1306     """Generate the table(s) with algorithm:
1307     table_perf_trending_dash_html specified in the specification
1308     file.
1309
1310     :param table: Table to generate.
1311     :param input_data: Data to process.
1312     :type table: dict
1313     :type input_data: InputData
1314     """
1315
1316     _ = input_data
1317
1318     if not table.get(u"testbed", None):
1319         logging.error(
1320             f"The testbed is not defined for the table "
1321             f"{table.get(u'title', u'')}. Skipping."
1322         )
1323         return
1324
1325     test_type = table.get(u"test-type", u"MRR")
1326     if test_type not in (u"MRR", u"NDR", u"PDR"):
1327         logging.error(
1328             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1329             f"Skipping."
1330         )
1331         return
1332
1333     if test_type in (u"NDR", u"PDR"):
1334         lnk_dir = u"../ndrpdr_trending/"
1335         lnk_sufix = f"-{test_type.lower()}"
1336     else:
1337         lnk_dir = u"../trending/"
1338         lnk_sufix = u""
1339
1340     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1341
1342     try:
1343         with open(table[u"input-file"], u'rt') as csv_file:
1344             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1345     except FileNotFoundError as err:
1346         logging.warning(f"{err}")
1347         return
1348     except KeyError:
1349         logging.warning(u"The input file is not defined.")
1350         return
1351     except csv.Error as err:
1352         logging.warning(
1353             f"Not possible to process the file {table[u'input-file']}.\n"
1354             f"{repr(err)}"
1355         )
1356         return
1357
1358     # Table:
1359     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1360
1361     # Table header:
1362     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1363     for idx, item in enumerate(csv_lst[0]):
1364         alignment = u"left" if idx == 0 else u"center"
1365         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1366         thead.text = item
1367
1368     # Rows:
1369     colors = {
1370         u"regression": (
1371             u"#ffcccc",
1372             u"#ff9999"
1373         ),
1374         u"progression": (
1375             u"#c6ecc6",
1376             u"#9fdf9f"
1377         ),
1378         u"normal": (
1379             u"#e9f1fb",
1380             u"#d4e4f7"
1381         )
1382     }
1383     for r_idx, row in enumerate(csv_lst[1:]):
1384         if int(row[4]):
1385             color = u"regression"
1386         elif int(row[5]):
1387             color = u"progression"
1388         else:
1389             color = u"normal"
1390         trow = ET.SubElement(
1391             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1392         )
1393
1394         # Columns:
1395         for c_idx, item in enumerate(row):
1396             tdata = ET.SubElement(
1397                 trow,
1398                 u"td",
1399                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1400             )
1401             # Name:
1402             if c_idx == 0 and table.get(u"add-links", True):
1403                 ref = ET.SubElement(
1404                     tdata,
1405                     u"a",
1406                     attrib=dict(
1407                         href=f"{lnk_dir}"
1408                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1409                         f"{lnk_sufix}"
1410                     )
1411                 )
1412                 ref.text = item
1413             else:
1414                 tdata.text = item
1415     try:
1416         with open(table[u"output-file"], u'w') as html_file:
1417             logging.info(f"    Writing file: {table[u'output-file']}")
1418             html_file.write(u".. raw:: html\n\n\t")
1419             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1420             html_file.write(u"\n\t<p><br><br></p>\n")
1421     except KeyError:
1422         logging.warning(u"The output file is not defined.")
1423         return
1424
1425
1426 def table_last_failed_tests(table, input_data):
1427     """Generate the table(s) with algorithm: table_last_failed_tests
1428     specified in the specification file.
1429
1430     :param table: Table to generate.
1431     :param input_data: Data to process.
1432     :type table: pandas.Series
1433     :type input_data: InputData
1434     """
1435
1436     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1437
1438     # Transform the data
1439     logging.info(
1440         f"    Creating the data set for the {table.get(u'type', u'')} "
1441         f"{table.get(u'title', u'')}."
1442     )
1443
1444     data = input_data.filter_data(table, continue_on_error=True)
1445
1446     if data is None or data.empty:
1447         logging.warning(
1448             f"    No data for the {table.get(u'type', u'')} "
1449             f"{table.get(u'title', u'')}."
1450         )
1451         return
1452
1453     tbl_list = list()
1454     for job, builds in table[u"data"].items():
1455         for build in builds:
1456             build = str(build)
1457             try:
1458                 version = input_data.metadata(job, build).get(u"version", u"")
1459                 duration = \
1460                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1461             except KeyError:
1462                 logging.error(f"Data for {job}: {build} is not present.")
1463                 return
1464             tbl_list.append(build)
1465             tbl_list.append(version)
1466             failed_tests = list()
1467             passed = 0
1468             failed = 0
1469             for tst_data in data[job][build].values:
1470                 if tst_data[u"status"] != u"FAIL":
1471                     passed += 1
1472                     continue
1473                 failed += 1
1474                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1475                 if not groups:
1476                     continue
1477                 nic = groups.group(0)
1478                 msg = tst_data[u'msg'].replace(u"\n", u"")
1479                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1480                              'xxx.xxx.xxx.xxx', msg)
1481                 msg = msg.split(u'Also teardown failed')[0]
1482                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1483             tbl_list.append(passed)
1484             tbl_list.append(failed)
1485             tbl_list.append(duration)
1486             tbl_list.extend(failed_tests)
1487
1488     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1489     logging.info(f"    Writing file: {file_name}")
1490     with open(file_name, u"wt") as file_handler:
1491         for test in tbl_list:
1492             file_handler.write(f"{test}\n")
1493
1494
1495 def table_failed_tests(table, input_data):
1496     """Generate the table(s) with algorithm: table_failed_tests
1497     specified in the specification file.
1498
1499     :param table: Table to generate.
1500     :param input_data: Data to process.
1501     :type table: pandas.Series
1502     :type input_data: InputData
1503     """
1504
1505     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1506
1507     # Transform the data
1508     logging.info(
1509         f"    Creating the data set for the {table.get(u'type', u'')} "
1510         f"{table.get(u'title', u'')}."
1511     )
1512     data = input_data.filter_data(table, continue_on_error=True)
1513
1514     test_type = u"MRR"
1515     if u"NDRPDR" in table.get(u"filter", list()):
1516         test_type = u"NDRPDR"
1517
1518     # Prepare the header of the tables
1519     header = [
1520         u"Test Case",
1521         u"Failures [#]",
1522         u"Last Failure [Time]",
1523         u"Last Failure [VPP-Build-Id]",
1524         u"Last Failure [CSIT-Job-Build-Id]"
1525     ]
1526
1527     # Generate the data for the table according to the model in the table
1528     # specification
1529
1530     now = dt.utcnow()
1531     timeperiod = timedelta(int(table.get(u"window", 7)))
1532
1533     tbl_dict = dict()
1534     for job, builds in table[u"data"].items():
1535         for build in builds:
1536             build = str(build)
1537             for tst_name, tst_data in data[job][build].items():
1538                 if tst_name.lower() in table.get(u"ignore-list", list()):
1539                     continue
1540                 if tbl_dict.get(tst_name, None) is None:
1541                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1542                     if not groups:
1543                         continue
1544                     nic = groups.group(0)
1545                     tbl_dict[tst_name] = {
1546                         u"name": f"{nic}-{tst_data[u'name']}",
1547                         u"data": OrderedDict()
1548                     }
1549                 try:
1550                     generated = input_data.metadata(job, build).\
1551                         get(u"generated", u"")
1552                     if not generated:
1553                         continue
1554                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1555                     if (now - then) <= timeperiod:
1556                         tbl_dict[tst_name][u"data"][build] = (
1557                             tst_data[u"status"],
1558                             generated,
1559                             input_data.metadata(job, build).get(u"version",
1560                                                                 u""),
1561                             build
1562                         )
1563                 except (TypeError, KeyError) as err:
1564                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1565
1566     max_fails = 0
1567     tbl_lst = list()
1568     for tst_data in tbl_dict.values():
1569         fails_nr = 0
1570         fails_last_date = u""
1571         fails_last_vpp = u""
1572         fails_last_csit = u""
1573         for val in tst_data[u"data"].values():
1574             if val[0] == u"FAIL":
1575                 fails_nr += 1
1576                 fails_last_date = val[1]
1577                 fails_last_vpp = val[2]
1578                 fails_last_csit = val[3]
1579         if fails_nr:
1580             max_fails = fails_nr if fails_nr > max_fails else max_fails
1581             tbl_lst.append([
1582                 tst_data[u"name"],
1583                 fails_nr,
1584                 fails_last_date,
1585                 fails_last_vpp,
1586                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1587                 f"-build-{fails_last_csit}"
1588             ])
1589
1590     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1591     tbl_sorted = list()
1592     for nrf in range(max_fails, -1, -1):
1593         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1594         tbl_sorted.extend(tbl_fails)
1595
1596     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1597     logging.info(f"    Writing file: {file_name}")
1598     with open(file_name, u"wt") as file_handler:
1599         file_handler.write(u",".join(header) + u"\n")
1600         for test in tbl_sorted:
1601             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1602
1603     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1604     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1605
1606
1607 def table_failed_tests_html(table, input_data):
1608     """Generate the table(s) with algorithm: table_failed_tests_html
1609     specified in the specification file.
1610
1611     :param table: Table to generate.
1612     :param input_data: Data to process.
1613     :type table: pandas.Series
1614     :type input_data: InputData
1615     """
1616
1617     _ = input_data
1618
1619     if not table.get(u"testbed", None):
1620         logging.error(
1621             f"The testbed is not defined for the table "
1622             f"{table.get(u'title', u'')}. Skipping."
1623         )
1624         return
1625
1626     test_type = table.get(u"test-type", u"MRR")
1627     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1628         logging.error(
1629             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1630             f"Skipping."
1631         )
1632         return
1633
1634     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1635         lnk_dir = u"../ndrpdr_trending/"
1636         lnk_sufix = u"-pdr"
1637     else:
1638         lnk_dir = u"../trending/"
1639         lnk_sufix = u""
1640
1641     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1642
1643     try:
1644         with open(table[u"input-file"], u'rt') as csv_file:
1645             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1646     except KeyError:
1647         logging.warning(u"The input file is not defined.")
1648         return
1649     except csv.Error as err:
1650         logging.warning(
1651             f"Not possible to process the file {table[u'input-file']}.\n"
1652             f"{repr(err)}"
1653         )
1654         return
1655
1656     # Table:
1657     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1658
1659     # Table header:
1660     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1661     for idx, item in enumerate(csv_lst[0]):
1662         alignment = u"left" if idx == 0 else u"center"
1663         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1664         thead.text = item
1665
1666     # Rows:
1667     colors = (u"#e9f1fb", u"#d4e4f7")
1668     for r_idx, row in enumerate(csv_lst[1:]):
1669         background = colors[r_idx % 2]
1670         trow = ET.SubElement(
1671             failed_tests, u"tr", attrib=dict(bgcolor=background)
1672         )
1673
1674         # Columns:
1675         for c_idx, item in enumerate(row):
1676             tdata = ET.SubElement(
1677                 trow,
1678                 u"td",
1679                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1680             )
1681             # Name:
1682             if c_idx == 0 and table.get(u"add-links", True):
1683                 ref = ET.SubElement(
1684                     tdata,
1685                     u"a",
1686                     attrib=dict(
1687                         href=f"{lnk_dir}"
1688                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1689                         f"{lnk_sufix}"
1690                     )
1691                 )
1692                 ref.text = item
1693             else:
1694                 tdata.text = item
1695     try:
1696         with open(table[u"output-file"], u'w') as html_file:
1697             logging.info(f"    Writing file: {table[u'output-file']}")
1698             html_file.write(u".. raw:: html\n\n\t")
1699             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1700             html_file.write(u"\n\t<p><br><br></p>\n")
1701     except KeyError:
1702         logging.warning(u"The output file is not defined.")
1703         return
1704
1705
1706 def table_comparison(table, input_data):
1707     """Generate the table(s) with algorithm: table_comparison
1708     specified in the specification file.
1709
1710     :param table: Table to generate.
1711     :param input_data: Data to process.
1712     :type table: pandas.Series
1713     :type input_data: InputData
1714     """
1715     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1716
1717     # Transform the data
1718     logging.info(
1719         f"    Creating the data set for the {table.get(u'type', u'')} "
1720         f"{table.get(u'title', u'')}."
1721     )
1722
1723     columns = table.get(u"columns", None)
1724     if not columns:
1725         logging.error(
1726             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1727         )
1728         return
1729
1730     cols = list()
1731     for idx, col in enumerate(columns):
1732         if col.get(u"data-set", None) is None:
1733             logging.warning(f"No data for column {col.get(u'title', u'')}")
1734             continue
1735         tag = col.get(u"tag", None)
1736         data = input_data.filter_data(
1737             table,
1738             params=[
1739                 u"throughput",
1740                 u"result",
1741                 u"latency",
1742                 u"name",
1743                 u"parent",
1744                 u"tags"
1745             ],
1746             data=col[u"data-set"],
1747             continue_on_error=True
1748         )
1749         col_data = {
1750             u"title": col.get(u"title", f"Column{idx}"),
1751             u"data": dict()
1752         }
1753         for builds in data.values:
1754             for build in builds:
1755                 for tst_name, tst_data in build.items():
1756                     if tag and tag not in tst_data[u"tags"]:
1757                         continue
1758                     tst_name_mod = \
1759                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1760                         replace(u"2n1l-", u"")
1761                     if col_data[u"data"].get(tst_name_mod, None) is None:
1762                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1763                         if u"across testbeds" in table[u"title"].lower() or \
1764                                 u"across topologies" in table[u"title"].lower():
1765                             name = _tpc_modify_displayed_test_name(name)
1766                         col_data[u"data"][tst_name_mod] = {
1767                             u"name": name,
1768                             u"replace": True,
1769                             u"data": list(),
1770                             u"mean": None,
1771                             u"stdev": None
1772                         }
1773                     _tpc_insert_data(
1774                         target=col_data[u"data"][tst_name_mod],
1775                         src=tst_data,
1776                         include_tests=table[u"include-tests"]
1777                     )
1778
1779         replacement = col.get(u"data-replacement", None)
1780         if replacement:
1781             rpl_data = input_data.filter_data(
1782                 table,
1783                 params=[
1784                     u"throughput",
1785                     u"result",
1786                     u"latency",
1787                     u"name",
1788                     u"parent",
1789                     u"tags"
1790                 ],
1791                 data=replacement,
1792                 continue_on_error=True
1793             )
1794             for builds in rpl_data.values:
1795                 for build in builds:
1796                     for tst_name, tst_data in build.items():
1797                         if tag and tag not in tst_data[u"tags"]:
1798                             continue
1799                         tst_name_mod = \
1800                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1801                             replace(u"2n1l-", u"")
1802                         if col_data[u"data"].get(tst_name_mod, None) is None:
1803                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1804                             if u"across testbeds" in table[u"title"].lower() \
1805                                     or u"across topologies" in \
1806                                     table[u"title"].lower():
1807                                 name = _tpc_modify_displayed_test_name(name)
1808                             col_data[u"data"][tst_name_mod] = {
1809                                 u"name": name,
1810                                 u"replace": False,
1811                                 u"data": list(),
1812                                 u"mean": None,
1813                                 u"stdev": None
1814                             }
1815                         if col_data[u"data"][tst_name_mod][u"replace"]:
1816                             col_data[u"data"][tst_name_mod][u"replace"] = False
1817                             col_data[u"data"][tst_name_mod][u"data"] = list()
1818                         _tpc_insert_data(
1819                             target=col_data[u"data"][tst_name_mod],
1820                             src=tst_data,
1821                             include_tests=table[u"include-tests"]
1822                         )
1823
1824         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1825                 u"latency" in table[u"include-tests"]:
1826             for tst_name, tst_data in col_data[u"data"].items():
1827                 if tst_data[u"data"]:
1828                     tst_data[u"mean"] = mean(tst_data[u"data"])
1829                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1830
1831         cols.append(col_data)
1832
1833     tbl_dict = dict()
1834     for col in cols:
1835         for tst_name, tst_data in col[u"data"].items():
1836             if tbl_dict.get(tst_name, None) is None:
1837                 tbl_dict[tst_name] = {
1838                     "name": tst_data[u"name"]
1839                 }
1840             tbl_dict[tst_name][col[u"title"]] = {
1841                 u"mean": tst_data[u"mean"],
1842                 u"stdev": tst_data[u"stdev"]
1843             }
1844
1845     if not tbl_dict:
1846         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1847         return
1848
1849     tbl_lst = list()
1850     for tst_data in tbl_dict.values():
1851         row = [tst_data[u"name"], ]
1852         for col in cols:
1853             row.append(tst_data.get(col[u"title"], None))
1854         tbl_lst.append(row)
1855
1856     comparisons = table.get(u"comparisons", None)
1857     rcas = list()
1858     if comparisons and isinstance(comparisons, list):
1859         for idx, comp in enumerate(comparisons):
1860             try:
1861                 col_ref = int(comp[u"reference"])
1862                 col_cmp = int(comp[u"compare"])
1863             except KeyError:
1864                 logging.warning(u"Comparison: No references defined! Skipping.")
1865                 comparisons.pop(idx)
1866                 continue
1867             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1868                     col_ref == col_cmp):
1869                 logging.warning(f"Wrong values of reference={col_ref} "
1870                                 f"and/or compare={col_cmp}. Skipping.")
1871                 comparisons.pop(idx)
1872                 continue
1873             rca_file_name = comp.get(u"rca-file", None)
1874             if rca_file_name:
1875                 try:
1876                     with open(rca_file_name, u"r") as file_handler:
1877                         rcas.append(
1878                             {
1879                                 u"title": f"RCA{idx + 1}",
1880                                 u"data": load(file_handler, Loader=FullLoader)
1881                             }
1882                         )
1883                 except (YAMLError, IOError) as err:
1884                     logging.warning(
1885                         f"The RCA file {rca_file_name} does not exist or "
1886                         f"it is corrupted!"
1887                     )
1888                     logging.debug(repr(err))
1889                     rcas.append(None)
1890             else:
1891                 rcas.append(None)
1892     else:
1893         comparisons = None
1894
1895     tbl_cmp_lst = list()
1896     if comparisons:
1897         for row in tbl_lst:
1898             new_row = deepcopy(row)
1899             for comp in comparisons:
1900                 ref_itm = row[int(comp[u"reference"])]
1901                 if ref_itm is None and \
1902                         comp.get(u"reference-alt", None) is not None:
1903                     ref_itm = row[int(comp[u"reference-alt"])]
1904                 cmp_itm = row[int(comp[u"compare"])]
1905                 if ref_itm is not None and cmp_itm is not None and \
1906                         ref_itm[u"mean"] is not None and \
1907                         cmp_itm[u"mean"] is not None and \
1908                         ref_itm[u"stdev"] is not None and \
1909                         cmp_itm[u"stdev"] is not None:
1910                     try:
1911                         delta, d_stdev = relative_change_stdev(
1912                             ref_itm[u"mean"], cmp_itm[u"mean"],
1913                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1914                         )
1915                     except ZeroDivisionError:
1916                         break
1917                     if delta is None or math.isnan(delta):
1918                         break
1919                     new_row.append({
1920                         u"mean": delta * 1e6,
1921                         u"stdev": d_stdev * 1e6
1922                     })
1923                 else:
1924                     break
1925             else:
1926                 tbl_cmp_lst.append(new_row)
1927
1928     try:
1929         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1930         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1931     except TypeError as err:
1932         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1933
1934     tbl_for_csv = list()
1935     for line in tbl_cmp_lst:
1936         row = [line[0], ]
1937         for idx, itm in enumerate(line[1:]):
1938             if itm is None or not isinstance(itm, dict) or\
1939                     itm.get(u'mean', None) is None or \
1940                     itm.get(u'stdev', None) is None:
1941                 row.append(u"NT")
1942                 row.append(u"NT")
1943             else:
1944                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1945                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1946         for rca in rcas:
1947             if rca is None:
1948                 continue
1949             rca_nr = rca[u"data"].get(row[0], u"-")
1950             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1951         tbl_for_csv.append(row)
1952
1953     header_csv = [u"Test Case", ]
1954     for col in cols:
1955         header_csv.append(f"Avg({col[u'title']})")
1956         header_csv.append(f"Stdev({col[u'title']})")
1957     for comp in comparisons:
1958         header_csv.append(
1959             f"Avg({comp.get(u'title', u'')})"
1960         )
1961         header_csv.append(
1962             f"Stdev({comp.get(u'title', u'')})"
1963         )
1964     for rca in rcas:
1965         if rca:
1966             header_csv.append(rca[u"title"])
1967
1968     legend_lst = table.get(u"legend", None)
1969     if legend_lst is None:
1970         legend = u""
1971     else:
1972         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1973
1974     footnote = u""
1975     if rcas and any(rcas):
1976         footnote += u"\nRoot Cause Analysis:\n"
1977         for rca in rcas:
1978             if rca:
1979                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1980
1981     csv_file_name = f"{table[u'output-file']}-csv.csv"
1982     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1983         file_handler.write(
1984             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1985         )
1986         for test in tbl_for_csv:
1987             file_handler.write(
1988                 u",".join([f'"{item}"' for item in test]) + u"\n"
1989             )
1990         if legend_lst:
1991             for item in legend_lst:
1992                 file_handler.write(f'"{item}"\n')
1993         if footnote:
1994             for itm in footnote.split(u"\n"):
1995                 file_handler.write(f'"{itm}"\n')
1996
1997     tbl_tmp = list()
1998     max_lens = [0, ] * len(tbl_cmp_lst[0])
1999     for line in tbl_cmp_lst:
2000         row = [line[0], ]
2001         for idx, itm in enumerate(line[1:]):
2002             if itm is None or not isinstance(itm, dict) or \
2003                     itm.get(u'mean', None) is None or \
2004                     itm.get(u'stdev', None) is None:
2005                 new_itm = u"NT"
2006             else:
2007                 if idx < len(cols):
2008                     new_itm = (
2009                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
2010                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2011                         replace(u"nan", u"NaN")
2012                     )
2013                 else:
2014                     new_itm = (
2015                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2016                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2017                         replace(u"nan", u"NaN")
2018                     )
2019             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2020                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2021             row.append(new_itm)
2022
2023         tbl_tmp.append(row)
2024
2025     header = [u"Test Case", ]
2026     header.extend([col[u"title"] for col in cols])
2027     header.extend([comp.get(u"title", u"") for comp in comparisons])
2028
2029     tbl_final = list()
2030     for line in tbl_tmp:
2031         row = [line[0], ]
2032         for idx, itm in enumerate(line[1:]):
2033             if itm in (u"NT", u"NaN"):
2034                 row.append(itm)
2035                 continue
2036             itm_lst = itm.rsplit(u"\u00B1", 1)
2037             itm_lst[-1] = \
2038                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2039             itm_str = u"\u00B1".join(itm_lst)
2040
2041             if idx >= len(cols):
2042                 # Diffs
2043                 rca = rcas[idx - len(cols)]
2044                 if rca:
2045                     # Add rcas to diffs
2046                     rca_nr = rca[u"data"].get(row[0], None)
2047                     if rca_nr:
2048                         hdr_len = len(header[idx + 1]) - 1
2049                         if hdr_len < 19:
2050                             hdr_len = 19
2051                         rca_nr = f"[{rca_nr}]"
2052                         itm_str = (
2053                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2054                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2055                             f"{itm_str}"
2056                         )
2057             row.append(itm_str)
2058         tbl_final.append(row)
2059
2060     # Generate csv tables:
2061     csv_file_name = f"{table[u'output-file']}.csv"
2062     logging.info(f"    Writing the file {csv_file_name}")
2063     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2064         file_handler.write(u";".join(header) + u"\n")
2065         for test in tbl_final:
2066             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2067
2068     # Generate txt table:
2069     txt_file_name = f"{table[u'output-file']}.txt"
2070     logging.info(f"    Writing the file {txt_file_name}")
2071     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2072
2073     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2074         file_handler.write(legend)
2075         file_handler.write(footnote)
2076
2077     # Generate html table:
2078     _tpc_generate_html_table(
2079         header,
2080         tbl_final,
2081         table[u'output-file'],
2082         legend=legend,
2083         footnote=footnote,
2084         sort_data=False,
2085         title=table.get(u"title", u"")
2086     )
2087
2088
2089 def table_weekly_comparison(table, in_data):
2090     """Generate the table(s) with algorithm: table_weekly_comparison
2091     specified in the specification file.
2092
2093     :param table: Table to generate.
2094     :param in_data: Data to process.
2095     :type table: pandas.Series
2096     :type in_data: InputData
2097     """
2098     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2099
2100     # Transform the data
2101     logging.info(
2102         f"    Creating the data set for the {table.get(u'type', u'')} "
2103         f"{table.get(u'title', u'')}."
2104     )
2105
2106     incl_tests = table.get(u"include-tests", None)
2107     if incl_tests not in (u"NDR", u"PDR"):
2108         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2109         return
2110
2111     nr_cols = table.get(u"nr-of-data-columns", None)
2112     if not nr_cols or nr_cols < 2:
2113         logging.error(
2114             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2115         )
2116         return
2117
2118     data = in_data.filter_data(
2119         table,
2120         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2121         continue_on_error=True
2122     )
2123
2124     header = [
2125         [u"VPP Version", ],
2126         [u"Start Timestamp", ],
2127         [u"CSIT Build", ],
2128         [u"CSIT Testbed", ]
2129     ]
2130     tbl_dict = dict()
2131     idx = 0
2132     tb_tbl = table.get(u"testbeds", None)
2133     for job_name, job_data in data.items():
2134         for build_nr, build in job_data.items():
2135             if idx >= nr_cols:
2136                 break
2137             if build.empty:
2138                 continue
2139
2140             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2141             if tb_ip and tb_tbl:
2142                 testbed = tb_tbl.get(tb_ip, u"")
2143             else:
2144                 testbed = u""
2145             header[2].insert(1, build_nr)
2146             header[3].insert(1, testbed)
2147             header[1].insert(
2148                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2149             )
2150             header[0].insert(
2151                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2152             )
2153
2154             for tst_name, tst_data in build.items():
2155                 tst_name_mod = \
2156                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2157                 if not tbl_dict.get(tst_name_mod, None):
2158                     tbl_dict[tst_name_mod] = dict(
2159                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2160                     )
2161                 try:
2162                     tbl_dict[tst_name_mod][-idx - 1] = \
2163                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2164                 except (TypeError, IndexError, KeyError, ValueError):
2165                     pass
2166             idx += 1
2167
2168     if idx < nr_cols:
2169         logging.error(u"Not enough data to build the table! Skipping")
2170         return
2171
2172     cmp_dict = dict()
2173     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2174         idx_ref = cmp.get(u"reference", None)
2175         idx_cmp = cmp.get(u"compare", None)
2176         if idx_ref is None or idx_cmp is None:
2177             continue
2178         header[0].append(
2179             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2180             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2181         )
2182         header[1].append(u"")
2183         header[2].append(u"")
2184         header[3].append(u"")
2185         for tst_name, tst_data in tbl_dict.items():
2186             if not cmp_dict.get(tst_name, None):
2187                 cmp_dict[tst_name] = list()
2188             ref_data = tst_data.get(idx_ref, None)
2189             cmp_data = tst_data.get(idx_cmp, None)
2190             if ref_data is None or cmp_data is None:
2191                 cmp_dict[tst_name].append(float(u'nan'))
2192             else:
2193                 cmp_dict[tst_name].append(
2194                     relative_change(ref_data, cmp_data)
2195                 )
2196
2197     tbl_lst_none = list()
2198     tbl_lst = list()
2199     for tst_name, tst_data in tbl_dict.items():
2200         itm_lst = [tst_data[u"name"], ]
2201         for idx in range(nr_cols):
2202             item = tst_data.get(-idx - 1, None)
2203             if item is None:
2204                 itm_lst.insert(1, None)
2205             else:
2206                 itm_lst.insert(1, round(item / 1e6, 1))
2207         itm_lst.extend(
2208             [
2209                 None if itm is None else round(itm, 1)
2210                 for itm in cmp_dict[tst_name]
2211             ]
2212         )
2213         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2214             tbl_lst_none.append(itm_lst)
2215         else:
2216             tbl_lst.append(itm_lst)
2217
2218     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2219     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2220     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2221     tbl_lst.extend(tbl_lst_none)
2222
2223     # Generate csv table:
2224     csv_file_name = f"{table[u'output-file']}.csv"
2225     logging.info(f"    Writing the file {csv_file_name}")
2226     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2227         for hdr in header:
2228             file_handler.write(u",".join(hdr) + u"\n")
2229         for test in tbl_lst:
2230             file_handler.write(u",".join(
2231                 [
2232                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2233                     replace(u"null", u"-") for item in test
2234                 ]
2235             ) + u"\n")
2236
2237     txt_file_name = f"{table[u'output-file']}.txt"
2238     logging.info(f"    Writing the file {txt_file_name}")
2239     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2240
2241     # Reorganize header in txt table
2242     txt_table = list()
2243     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2244         for line in list(file_handler):
2245             txt_table.append(line)
2246     try:
2247         txt_table.insert(5, txt_table.pop(2))
2248         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2249             file_handler.writelines(txt_table)
2250     except IndexError:
2251         pass
2252
2253     # Generate html table:
2254     hdr_html = [
2255         u"<br>".join(row) for row in zip(*header)
2256     ]
2257     _tpc_generate_html_table(
2258         hdr_html,
2259         tbl_lst,
2260         table[u'output-file'],
2261         sort_data=True,
2262         title=table.get(u"title", u""),
2263         generate_rst=False
2264     )