Report: Add normalized comp tabels - static content
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 REGEX_TOPO_ARCH = re.compile(r'^(\dn-.{3})')
43
44 NORM_FREQ = 2.0  # [GHz]
45
46
47 def generate_tables(spec, data):
48     """Generate all tables specified in the specification file.
49
50     :param spec: Specification read from the specification file.
51     :param data: Data to process.
52     :type spec: Specification
53     :type data: InputData
54     """
55
56     generator = {
57         "table_merged_details": table_merged_details,
58         "table_soak_vs_ndr": table_soak_vs_ndr,
59         "table_perf_trending_dash": table_perf_trending_dash,
60         "table_perf_trending_dash_html": table_perf_trending_dash_html,
61         "table_last_failed_tests": table_last_failed_tests,
62         "table_failed_tests": table_failed_tests,
63         "table_failed_tests_html": table_failed_tests_html,
64         "table_oper_data_html": table_oper_data_html,
65         "table_comparison": table_comparison,
66         "table_weekly_comparison": table_weekly_comparison,
67         "table_job_spec_duration": table_job_spec_duration
68     }
69
70     logging.info(u"Generating the tables ...")
71
72     norm_factor = dict()
73     for key, val in spec.environment.get("frequency", dict()).items():
74         norm_factor[key] = NORM_FREQ / val
75
76     for table in spec.tables:
77         try:
78             if table["algorithm"] == "table_weekly_comparison":
79                 table["testbeds"] = spec.environment.get("testbeds", None)
80             if table["algorithm"] == "table_comparison":
81                 table["norm_factor"] = norm_factor
82             generator[table["algorithm"]](table, data)
83         except NameError as err:
84             logging.error(
85                 f"Probably algorithm {table['algorithm']} is not defined: "
86                 f"{repr(err)}"
87             )
88     logging.info("Done.")
89
90
91 def table_job_spec_duration(table, input_data):
92     """Generate the table(s) with algorithm: table_job_spec_duration
93     specified in the specification file.
94
95     :param table: Table to generate.
96     :param input_data: Data to process.
97     :type table: pandas.Series
98     :type input_data: InputData
99     """
100
101     _ = input_data
102
103     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
104
105     jb_type = table.get(u"jb-type", None)
106
107     tbl_lst = list()
108     if jb_type == u"iterative":
109         for line in table.get(u"lines", tuple()):
110             tbl_itm = {
111                 u"name": line.get(u"job-spec", u""),
112                 u"data": list()
113             }
114             for job, builds in line.get(u"data-set", dict()).items():
115                 for build_nr in builds:
116                     try:
117                         minutes = input_data.metadata(
118                             job, str(build_nr)
119                         )[u"elapsedtime"] // 60000
120                     except (KeyError, IndexError, ValueError, AttributeError):
121                         continue
122                     tbl_itm[u"data"].append(minutes)
123             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
124             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
125             tbl_lst.append(tbl_itm)
126     elif jb_type == u"coverage":
127         job = table.get(u"data", None)
128         if not job:
129             return
130         for line in table.get(u"lines", tuple()):
131             try:
132                 tbl_itm = {
133                     u"name": line.get(u"job-spec", u""),
134                     u"mean": input_data.metadata(
135                         list(job.keys())[0], str(line[u"build"])
136                     )[u"elapsedtime"] // 60000,
137                     u"stdev": float(u"nan")
138                 }
139                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
140             except (KeyError, IndexError, ValueError, AttributeError):
141                 continue
142             tbl_lst.append(tbl_itm)
143     else:
144         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
145         return
146
147     for line in tbl_lst:
148         line[u"mean"] = \
149             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
150         if math.isnan(line[u"stdev"]):
151             line[u"stdev"] = u""
152         else:
153             line[u"stdev"] = \
154                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
155
156     if not tbl_lst:
157         return
158
159     rows = list()
160     for itm in tbl_lst:
161         rows.append([
162             itm[u"name"],
163             f"{len(itm[u'data'])}",
164             f"{itm[u'mean']} +- {itm[u'stdev']}"
165             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
166         ])
167
168     txt_table = prettytable.PrettyTable(
169         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
170     )
171     for row in rows:
172         txt_table.add_row(row)
173     txt_table.align = u"r"
174     txt_table.align[u"Job Specification"] = u"l"
175
176     file_name = f"{table.get(u'output-file', u'')}.txt"
177     with open(file_name, u"wt", encoding='utf-8') as txt_file:
178         txt_file.write(str(txt_table))
179
180
181 def table_oper_data_html(table, input_data):
182     """Generate the table(s) with algorithm: html_table_oper_data
183     specified in the specification file.
184
185     :param table: Table to generate.
186     :param input_data: Data to process.
187     :type table: pandas.Series
188     :type input_data: InputData
189     """
190
191     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
192     # Transform the data
193     logging.info(
194         f"    Creating the data set for the {table.get(u'type', u'')} "
195         f"{table.get(u'title', u'')}."
196     )
197     data = input_data.filter_data(
198         table,
199         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
200         continue_on_error=True
201     )
202     if data.empty:
203         return
204     data = input_data.merge_data(data)
205
206     sort_tests = table.get(u"sort", None)
207     if sort_tests:
208         args = dict(
209             inplace=True,
210             ascending=(sort_tests == u"ascending")
211         )
212         data.sort_index(**args)
213
214     suites = input_data.filter_data(
215         table,
216         continue_on_error=True,
217         data_set=u"suites"
218     )
219     if suites.empty:
220         return
221     suites = input_data.merge_data(suites)
222
223     def _generate_html_table(tst_data):
224         """Generate an HTML table with operational data for the given test.
225
226         :param tst_data: Test data to be used to generate the table.
227         :type tst_data: pandas.Series
228         :returns: HTML table with operational data.
229         :rtype: str
230         """
231
232         colors = {
233             u"header": u"#7eade7",
234             u"empty": u"#ffffff",
235             u"body": (u"#e9f1fb", u"#d4e4f7")
236         }
237
238         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
239
240         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
241         thead = ET.SubElement(
242             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
243         )
244         thead.text = tst_data[u"name"]
245
246         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
247         thead = ET.SubElement(
248             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
249         )
250         thead.text = u"\t"
251
252         if tst_data.get(u"telemetry-show-run", None) is None or \
253                 isinstance(tst_data[u"telemetry-show-run"], str):
254             trow = ET.SubElement(
255                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256             )
257             tcol = ET.SubElement(
258                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
259             )
260             tcol.text = u"No Data"
261
262             trow = ET.SubElement(
263                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
264             )
265             thead = ET.SubElement(
266                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
267             )
268             font = ET.SubElement(
269                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
270             )
271             font.text = u"."
272             return str(ET.tostring(tbl, encoding=u"unicode"))
273
274         tbl_hdr = (
275             u"Name",
276             u"Nr of Vectors",
277             u"Nr of Packets",
278             u"Suspends",
279             u"Cycles per Packet",
280             u"Average Vector Size"
281         )
282
283         for dut_data in tst_data[u"telemetry-show-run"].values():
284             trow = ET.SubElement(
285                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
286             )
287             tcol = ET.SubElement(
288                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
289             )
290             if dut_data.get(u"runtime", None) is None:
291                 tcol.text = u"No Data"
292                 continue
293
294             runtime = dict()
295             for item in dut_data[u"runtime"].get(u"data", tuple()):
296                 tid = int(item[u"labels"][u"thread_id"])
297                 if runtime.get(tid, None) is None:
298                     runtime[tid] = dict()
299                 gnode = item[u"labels"][u"graph_node"]
300                 if runtime[tid].get(gnode, None) is None:
301                     runtime[tid][gnode] = dict()
302                 try:
303                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
304                 except ValueError:
305                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
306
307             threads = dict({idx: list() for idx in range(len(runtime))})
308             for idx, run_data in runtime.items():
309                 for gnode, gdata in run_data.items():
310                     threads[idx].append([
311                         gnode,
312                         int(gdata[u"calls"]),
313                         int(gdata[u"vectors"]),
314                         int(gdata[u"suspends"]),
315                         float(gdata[u"clocks"]),
316                         float(gdata[u"vectors"] / gdata[u"calls"]) \
317                             if gdata[u"calls"] else 0.0
318                     ])
319
320             bold = ET.SubElement(tcol, u"b")
321             bold.text = (
322                 f"Host IP: {dut_data.get(u'host', '')}, "
323                 f"Socket: {dut_data.get(u'socket', '')}"
324             )
325             trow = ET.SubElement(
326                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
327             )
328             thead = ET.SubElement(
329                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
330             )
331             thead.text = u"\t"
332
333             for thread_nr, thread in threads.items():
334                 trow = ET.SubElement(
335                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
336                 )
337                 tcol = ET.SubElement(
338                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
339                 )
340                 bold = ET.SubElement(tcol, u"b")
341                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
342                 trow = ET.SubElement(
343                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
344                 )
345                 for idx, col in enumerate(tbl_hdr):
346                     tcol = ET.SubElement(
347                         trow, u"td",
348                         attrib=dict(align=u"right" if idx else u"left")
349                     )
350                     font = ET.SubElement(
351                         tcol, u"font", attrib=dict(size=u"2")
352                     )
353                     bold = ET.SubElement(font, u"b")
354                     bold.text = col
355                 for row_nr, row in enumerate(thread):
356                     trow = ET.SubElement(
357                         tbl, u"tr",
358                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
359                     )
360                     for idx, col in enumerate(row):
361                         tcol = ET.SubElement(
362                             trow, u"td",
363                             attrib=dict(align=u"right" if idx else u"left")
364                         )
365                         font = ET.SubElement(
366                             tcol, u"font", attrib=dict(size=u"2")
367                         )
368                         if isinstance(col, float):
369                             font.text = f"{col:.2f}"
370                         else:
371                             font.text = str(col)
372                 trow = ET.SubElement(
373                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
374                 )
375                 thead = ET.SubElement(
376                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
377                 )
378                 thead.text = u"\t"
379
380         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
381         thead = ET.SubElement(
382             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
383         )
384         font = ET.SubElement(
385             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
386         )
387         font.text = u"."
388
389         return str(ET.tostring(tbl, encoding=u"unicode"))
390
391     for suite in suites.values:
392         html_table = str()
393         for test_data in data.values:
394             if test_data[u"parent"] not in suite[u"name"]:
395                 continue
396             html_table += _generate_html_table(test_data)
397         if not html_table:
398             continue
399         try:
400             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
401             with open(f"{file_name}", u'w') as html_file:
402                 logging.info(f"    Writing file: {file_name}")
403                 html_file.write(u".. raw:: html\n\n\t")
404                 html_file.write(html_table)
405                 html_file.write(u"\n\t<p><br><br></p>\n")
406         except KeyError:
407             logging.warning(u"The output file is not defined.")
408             return
409     logging.info(u"  Done.")
410
411
412 def table_merged_details(table, input_data):
413     """Generate the table(s) with algorithm: table_merged_details
414     specified in the specification file.
415
416     :param table: Table to generate.
417     :param input_data: Data to process.
418     :type table: pandas.Series
419     :type input_data: InputData
420     """
421
422     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
423
424     # Transform the data
425     logging.info(
426         f"    Creating the data set for the {table.get(u'type', u'')} "
427         f"{table.get(u'title', u'')}."
428     )
429     data = input_data.filter_data(table, continue_on_error=True)
430     data = input_data.merge_data(data)
431
432     sort_tests = table.get(u"sort", None)
433     if sort_tests:
434         args = dict(
435             inplace=True,
436             ascending=(sort_tests == u"ascending")
437         )
438         data.sort_index(**args)
439
440     suites = input_data.filter_data(
441         table, continue_on_error=True, data_set=u"suites")
442     suites = input_data.merge_data(suites)
443
444     # Prepare the header of the tables
445     header = list()
446     for column in table[u"columns"]:
447         header.append(
448             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
449         )
450
451     for suite in suites.values:
452         # Generate data
453         suite_name = suite[u"name"]
454         table_lst = list()
455         for test in data.keys():
456             if data[test][u"status"] != u"PASS" or \
457                     data[test][u"parent"] not in suite_name:
458                 continue
459             row_lst = list()
460             for column in table[u"columns"]:
461                 try:
462                     col_data = str(data[test][column[
463                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
464                     # Do not include tests with "Test Failed" in test message
465                     if u"Test Failed" in col_data:
466                         continue
467                     col_data = col_data.replace(
468                         u"No Data", u"Not Captured     "
469                     )
470                     if column[u"data"].split(u" ")[1] in (u"name", ):
471                         if len(col_data) > 30:
472                             col_data_lst = col_data.split(u"-")
473                             half = int(len(col_data_lst) / 2)
474                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
475                                        f"- |br| " \
476                                        f"{u'-'.join(col_data_lst[half:])}"
477                         col_data = f" |prein| {col_data} |preout| "
478                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
479                         # Temporary solution: remove NDR results from message:
480                         if bool(table.get(u'remove-ndr', False)):
481                             try:
482                                 col_data = col_data.split(u"\n", 1)[1]
483                             except IndexError:
484                                 pass
485                         col_data = col_data.replace(u'\n', u' |br| ').\
486                             replace(u'\r', u'').replace(u'"', u"'")
487                         col_data = f" |prein| {col_data} |preout| "
488                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
489                         col_data = col_data.replace(u'\n', u' |br| ')
490                         col_data = f" |prein| {col_data[:-5]} |preout| "
491                     row_lst.append(f'"{col_data}"')
492                 except KeyError:
493                     row_lst.append(u'"Not captured"')
494             if len(row_lst) == len(table[u"columns"]):
495                 table_lst.append(row_lst)
496
497         # Write the data to file
498         if table_lst:
499             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
500             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
501             logging.info(f"      Writing file: {file_name}")
502             with open(file_name, u"wt") as file_handler:
503                 file_handler.write(u",".join(header) + u"\n")
504                 for item in table_lst:
505                     file_handler.write(u",".join(item) + u"\n")
506
507     logging.info(u"  Done.")
508
509
510 def _tpc_modify_test_name(test_name, ignore_nic=False):
511     """Modify a test name by replacing its parts.
512
513     :param test_name: Test name to be modified.
514     :param ignore_nic: If True, NIC is removed from TC name.
515     :type test_name: str
516     :type ignore_nic: bool
517     :returns: Modified test name.
518     :rtype: str
519     """
520     test_name_mod = test_name.\
521         replace(u"-ndrpdr", u"").\
522         replace(u"1t1c", u"1c").\
523         replace(u"2t1c", u"1c"). \
524         replace(u"2t2c", u"2c").\
525         replace(u"4t2c", u"2c"). \
526         replace(u"4t4c", u"4c").\
527         replace(u"8t4c", u"4c")
528
529     if ignore_nic:
530         return re.sub(REGEX_NIC, u"", test_name_mod)
531     return test_name_mod
532
533
534 def _tpc_modify_displayed_test_name(test_name):
535     """Modify a test name which is displayed in a table by replacing its parts.
536
537     :param test_name: Test name to be modified.
538     :type test_name: str
539     :returns: Modified test name.
540     :rtype: str
541     """
542     return test_name.\
543         replace(u"1t1c", u"1c").\
544         replace(u"2t1c", u"1c"). \
545         replace(u"2t2c", u"2c").\
546         replace(u"4t2c", u"2c"). \
547         replace(u"4t4c", u"4c").\
548         replace(u"8t4c", u"4c")
549
550
551 def _tpc_insert_data(target, src, include_tests):
552     """Insert src data to the target structure.
553
554     :param target: Target structure where the data is placed.
555     :param src: Source data to be placed into the target structure.
556     :param include_tests: Which results will be included (MRR, NDR, PDR).
557     :type target: list
558     :type src: dict
559     :type include_tests: str
560     """
561     try:
562         if include_tests == u"MRR":
563             target[u"mean"] = src[u"result"][u"receive-rate"]
564             target[u"stdev"] = src[u"result"][u"receive-stdev"]
565         elif include_tests == u"PDR":
566             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
567         elif include_tests == u"NDR":
568             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
569         elif u"latency" in include_tests:
570             keys = include_tests.split(u"-")
571             if len(keys) == 4:
572                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
573                 target[u"data"].append(
574                     float(u"nan") if lat == -1 else lat * 1e6
575                 )
576         elif include_tests == u"hoststack":
577             try:
578                 target[u"data"].append(
579                     float(src[u"result"][u"bits_per_second"])
580                 )
581             except KeyError:
582                 target[u"data"].append(
583                     (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
584                     ((float(src[u"result"][u"client"][u"time"]) +
585                       float(src[u"result"][u"server"][u"time"])) / 2)
586                 )
587         elif include_tests == u"vsap":
588             try:
589                 target[u"data"].append(src[u"result"][u"cps"])
590             except KeyError:
591                 target[u"data"].append(src[u"result"][u"rps"])
592     except (KeyError, TypeError):
593         pass
594
595
596 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
597                              footnote=u"", sort_data=True, title=u"",
598                              generate_rst=True):
599     """Generate html table from input data with simple sorting possibility.
600
601     :param header: Table header.
602     :param data: Input data to be included in the table. It is a list of lists.
603         Inner lists are rows in the table. All inner lists must be of the same
604         length. The length of these lists must be the same as the length of the
605         header.
606     :param out_file_name: The name (relative or full path) where the
607         generated html table is written.
608     :param legend: The legend to display below the table.
609     :param footnote: The footnote to display below the table (and legend).
610     :param sort_data: If True the data sorting is enabled.
611     :param title: The table (and file) title.
612     :param generate_rst: If True, wrapping rst file is generated.
613     :type header: list
614     :type data: list of lists
615     :type out_file_name: str
616     :type legend: str
617     :type footnote: str
618     :type sort_data: bool
619     :type title: str
620     :type generate_rst: bool
621     """
622
623     try:
624         idx = header.index(u"Test Case")
625     except ValueError:
626         idx = 0
627     params = {
628         u"align-hdr": (
629             [u"left", u"right"],
630             [u"left", u"left", u"right"],
631             [u"left", u"left", u"left", u"right"]
632         ),
633         u"align-itm": (
634             [u"left", u"right"],
635             [u"left", u"left", u"right"],
636             [u"left", u"left", u"left", u"right"]
637         ),
638         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
639     }
640
641     df_data = pd.DataFrame(data, columns=header)
642
643     if sort_data:
644         df_sorted = [df_data.sort_values(
645             by=[key, header[idx]], ascending=[True, True]
646             if key != header[idx] else [False, True]) for key in header]
647         df_sorted_rev = [df_data.sort_values(
648             by=[key, header[idx]], ascending=[False, True]
649             if key != header[idx] else [True, True]) for key in header]
650         df_sorted.extend(df_sorted_rev)
651     else:
652         df_sorted = df_data
653
654     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
655                    for idx in range(len(df_data))]]
656     table_header = dict(
657         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
658         fill_color=u"#7eade7",
659         align=params[u"align-hdr"][idx],
660         font=dict(
661             family=u"Courier New",
662             size=12
663         )
664     )
665
666     fig = go.Figure()
667
668     if sort_data:
669         for table in df_sorted:
670             columns = [table.get(col) for col in header]
671             fig.add_trace(
672                 go.Table(
673                     columnwidth=params[u"width"][idx],
674                     header=table_header,
675                     cells=dict(
676                         values=columns,
677                         fill_color=fill_color,
678                         align=params[u"align-itm"][idx],
679                         font=dict(
680                             family=u"Courier New",
681                             size=12
682                         )
683                     )
684                 )
685             )
686
687         buttons = list()
688         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
689         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
690         for idx, hdr in enumerate(menu_items):
691             visible = [False, ] * len(menu_items)
692             visible[idx] = True
693             buttons.append(
694                 dict(
695                     label=hdr.replace(u" [Mpps]", u""),
696                     method=u"update",
697                     args=[{u"visible": visible}],
698                 )
699             )
700
701         fig.update_layout(
702             updatemenus=[
703                 go.layout.Updatemenu(
704                     type=u"dropdown",
705                     direction=u"down",
706                     x=0.0,
707                     xanchor=u"left",
708                     y=1.002,
709                     yanchor=u"bottom",
710                     active=len(menu_items) - 1,
711                     buttons=list(buttons)
712                 )
713             ],
714         )
715     else:
716         fig.add_trace(
717             go.Table(
718                 columnwidth=params[u"width"][idx],
719                 header=table_header,
720                 cells=dict(
721                     values=[df_sorted.get(col) for col in header],
722                     fill_color=fill_color,
723                     align=params[u"align-itm"][idx],
724                     font=dict(
725                         family=u"Courier New",
726                         size=12
727                     )
728                 )
729             )
730         )
731
732     ploff.plot(
733         fig,
734         show_link=False,
735         auto_open=False,
736         filename=f"{out_file_name}_in.html"
737     )
738
739     if not generate_rst:
740         return
741
742     file_name = out_file_name.split(u"/")[-1]
743     if u"vpp" in out_file_name:
744         path = u"_tmp/src/vpp_performance_tests/comparisons/"
745     else:
746         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
747     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
748     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
749         rst_file.write(
750             u"\n"
751             u".. |br| raw:: html\n\n    <br />\n\n\n"
752             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
753             u".. |preout| raw:: html\n\n    </pre>\n\n"
754         )
755         if title:
756             rst_file.write(f"{title}\n")
757             rst_file.write(f"{u'`' * len(title)}\n\n")
758         rst_file.write(
759             u".. raw:: html\n\n"
760             f'    <iframe frameborder="0" scrolling="no" '
761             f'width="1600" height="1200" '
762             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
763             f'</iframe>\n\n'
764         )
765
766         if legend:
767             try:
768                 itm_lst = legend[1:-2].split(u"\n")
769                 rst_file.write(
770                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
771                 )
772             except IndexError as err:
773                 logging.error(f"Legend cannot be written to html file\n{err}")
774         if footnote:
775             try:
776                 itm_lst = footnote[1:].split(u"\n")
777                 rst_file.write(
778                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
779                 )
780             except IndexError as err:
781                 logging.error(f"Footnote cannot be written to html file\n{err}")
782
783
784 def table_soak_vs_ndr(table, input_data):
785     """Generate the table(s) with algorithm: table_soak_vs_ndr
786     specified in the specification file.
787
788     :param table: Table to generate.
789     :param input_data: Data to process.
790     :type table: pandas.Series
791     :type input_data: InputData
792     """
793
794     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
795
796     # Transform the data
797     logging.info(
798         f"    Creating the data set for the {table.get(u'type', u'')} "
799         f"{table.get(u'title', u'')}."
800     )
801     data = input_data.filter_data(table, continue_on_error=True)
802
803     # Prepare the header of the table
804     try:
805         header = [
806             u"Test Case",
807             f"Avg({table[u'reference'][u'title']})",
808             f"Stdev({table[u'reference'][u'title']})",
809             f"Avg({table[u'compare'][u'title']})",
810             f"Stdev{table[u'compare'][u'title']})",
811             u"Diff",
812             u"Stdev(Diff)"
813         ]
814         header_str = u";".join(header) + u"\n"
815         legend = (
816             u"\nLegend:\n"
817             f"Avg({table[u'reference'][u'title']}): "
818             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
819             f"from a series of runs of the listed tests.\n"
820             f"Stdev({table[u'reference'][u'title']}): "
821             f"Standard deviation value of {table[u'reference'][u'title']} "
822             f"[Mpps] computed from a series of runs of the listed tests.\n"
823             f"Avg({table[u'compare'][u'title']}): "
824             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
825             f"a series of runs of the listed tests.\n"
826             f"Stdev({table[u'compare'][u'title']}): "
827             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
828             f"computed from a series of runs of the listed tests.\n"
829             f"Diff({table[u'reference'][u'title']},"
830             f"{table[u'compare'][u'title']}): "
831             f"Percentage change calculated for mean values.\n"
832             u"Stdev(Diff): "
833             u"Standard deviation of percentage change calculated for mean "
834             u"values."
835         )
836     except (AttributeError, KeyError) as err:
837         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
838         return
839
840     # Create a list of available SOAK test results:
841     tbl_dict = dict()
842     for job, builds in table[u"compare"][u"data"].items():
843         for build in builds:
844             for tst_name, tst_data in data[job][str(build)].items():
845                 if tst_data[u"type"] == u"SOAK":
846                     tst_name_mod = tst_name.replace(u"-soak", u"")
847                     if tbl_dict.get(tst_name_mod, None) is None:
848                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
849                         nic = groups.group(0) if groups else u""
850                         name = (
851                             f"{nic}-"
852                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
853                         )
854                         tbl_dict[tst_name_mod] = {
855                             u"name": name,
856                             u"ref-data": list(),
857                             u"cmp-data": list()
858                         }
859                     try:
860                         tbl_dict[tst_name_mod][u"cmp-data"].append(
861                             tst_data[u"throughput"][u"LOWER"])
862                     except (KeyError, TypeError):
863                         pass
864     tests_lst = tbl_dict.keys()
865
866     # Add corresponding NDR test results:
867     for job, builds in table[u"reference"][u"data"].items():
868         for build in builds:
869             for tst_name, tst_data in data[job][str(build)].items():
870                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
871                     replace(u"-mrr", u"")
872                 if tst_name_mod not in tests_lst:
873                     continue
874                 try:
875                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
876                         continue
877                     if table[u"include-tests"] == u"MRR":
878                         result = (tst_data[u"result"][u"receive-rate"],
879                                   tst_data[u"result"][u"receive-stdev"])
880                     elif table[u"include-tests"] == u"PDR":
881                         result = \
882                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
883                     elif table[u"include-tests"] == u"NDR":
884                         result = \
885                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
886                     else:
887                         result = None
888                     if result is not None:
889                         tbl_dict[tst_name_mod][u"ref-data"].append(
890                             result)
891                 except (KeyError, TypeError):
892                     continue
893
894     tbl_lst = list()
895     for tst_name in tbl_dict:
896         item = [tbl_dict[tst_name][u"name"], ]
897         data_r = tbl_dict[tst_name][u"ref-data"]
898         if data_r:
899             if table[u"include-tests"] == u"MRR":
900                 data_r_mean = data_r[0][0]
901                 data_r_stdev = data_r[0][1]
902             else:
903                 data_r_mean = mean(data_r)
904                 data_r_stdev = stdev(data_r)
905             item.append(round(data_r_mean / 1e6, 1))
906             item.append(round(data_r_stdev / 1e6, 1))
907         else:
908             data_r_mean = None
909             data_r_stdev = None
910             item.extend([None, None])
911         data_c = tbl_dict[tst_name][u"cmp-data"]
912         if data_c:
913             if table[u"include-tests"] == u"MRR":
914                 data_c_mean = data_c[0][0]
915                 data_c_stdev = data_c[0][1]
916             else:
917                 data_c_mean = mean(data_c)
918                 data_c_stdev = stdev(data_c)
919             item.append(round(data_c_mean / 1e6, 1))
920             item.append(round(data_c_stdev / 1e6, 1))
921         else:
922             data_c_mean = None
923             data_c_stdev = None
924             item.extend([None, None])
925         if data_r_mean is not None and data_c_mean is not None:
926             delta, d_stdev = relative_change_stdev(
927                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
928             try:
929                 item.append(round(delta))
930             except ValueError:
931                 item.append(delta)
932             try:
933                 item.append(round(d_stdev))
934             except ValueError:
935                 item.append(d_stdev)
936             tbl_lst.append(item)
937
938     # Sort the table according to the relative change
939     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
940
941     # Generate csv tables:
942     csv_file_name = f"{table[u'output-file']}.csv"
943     with open(csv_file_name, u"wt") as file_handler:
944         file_handler.write(header_str)
945         for test in tbl_lst:
946             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
947
948     convert_csv_to_pretty_txt(
949         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
950     )
951     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
952         file_handler.write(legend)
953
954     # Generate html table:
955     _tpc_generate_html_table(
956         header,
957         tbl_lst,
958         table[u'output-file'],
959         legend=legend,
960         title=table.get(u"title", u"")
961     )
962
963
964 def table_perf_trending_dash(table, input_data):
965     """Generate the table(s) with algorithm:
966     table_perf_trending_dash
967     specified in the specification file.
968
969     :param table: Table to generate.
970     :param input_data: Data to process.
971     :type table: pandas.Series
972     :type input_data: InputData
973     """
974
975     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
976
977     # Transform the data
978     logging.info(
979         f"    Creating the data set for the {table.get(u'type', u'')} "
980         f"{table.get(u'title', u'')}."
981     )
982     data = input_data.filter_data(table, continue_on_error=True)
983
984     # Prepare the header of the tables
985     header = [
986         u"Test Case",
987         u"Trend [Mpps]",
988         u"Runs [#]",
989         u"Long-Term Change [%]",
990         u"Regressions [#]",
991         u"Progressions [#]"
992     ]
993     header_str = u",".join(header) + u"\n"
994
995     incl_tests = table.get(u"include-tests", u"MRR")
996
997     # Prepare data to the table:
998     tbl_dict = dict()
999     for job, builds in table[u"data"].items():
1000         for build in builds:
1001             for tst_name, tst_data in data[job][str(build)].items():
1002                 if tst_name.lower() in table.get(u"ignore-list", list()):
1003                     continue
1004                 if tbl_dict.get(tst_name, None) is None:
1005                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1006                     if not groups:
1007                         continue
1008                     nic = groups.group(0)
1009                     tbl_dict[tst_name] = {
1010                         u"name": f"{nic}-{tst_data[u'name']}",
1011                         u"data": OrderedDict()
1012                     }
1013                 try:
1014                     if incl_tests == u"MRR":
1015                         tbl_dict[tst_name][u"data"][str(build)] = \
1016                             tst_data[u"result"][u"receive-rate"]
1017                     elif incl_tests == u"NDR":
1018                         tbl_dict[tst_name][u"data"][str(build)] = \
1019                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1020                     elif incl_tests == u"PDR":
1021                         tbl_dict[tst_name][u"data"][str(build)] = \
1022                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1023                 except (TypeError, KeyError):
1024                     pass  # No data in output.xml for this test
1025
1026     tbl_lst = list()
1027     for tst_name in tbl_dict:
1028         data_t = tbl_dict[tst_name][u"data"]
1029         if len(data_t) < 2:
1030             continue
1031
1032         try:
1033             classification_lst, avgs, _ = classify_anomalies(data_t)
1034         except ValueError as err:
1035             logging.info(f"{err} Skipping")
1036             return
1037
1038         win_size = min(len(data_t), table[u"window"])
1039         long_win_size = min(len(data_t), table[u"long-trend-window"])
1040
1041         try:
1042             max_long_avg = max(
1043                 [x for x in avgs[-long_win_size:-win_size]
1044                  if not isnan(x)])
1045         except ValueError:
1046             max_long_avg = nan
1047         last_avg = avgs[-1]
1048         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1049
1050         nr_of_last_avgs = 0;
1051         for x in reversed(avgs):
1052             if x == last_avg:
1053                 nr_of_last_avgs += 1
1054             else:
1055                 break
1056
1057         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1058             rel_change_last = nan
1059         else:
1060             rel_change_last = round(
1061                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1062
1063         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1064             rel_change_long = nan
1065         else:
1066             rel_change_long = round(
1067                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1068
1069         if classification_lst:
1070             if isnan(rel_change_last) and isnan(rel_change_long):
1071                 continue
1072             if isnan(last_avg) or isnan(rel_change_last) or \
1073                     isnan(rel_change_long):
1074                 continue
1075             tbl_lst.append(
1076                 [tbl_dict[tst_name][u"name"],
1077                  round(last_avg / 1e6, 2),
1078                  nr_of_last_avgs,
1079                  rel_change_long,
1080                  classification_lst[-win_size+1:].count(u"regression"),
1081                  classification_lst[-win_size+1:].count(u"progression")])
1082
1083     tbl_lst.sort(key=lambda rel: rel[0])
1084     tbl_lst.sort(key=lambda rel: rel[2])
1085     tbl_lst.sort(key=lambda rel: rel[3])
1086     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1087     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1088
1089     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1090
1091     logging.info(f"    Writing file: {file_name}")
1092     with open(file_name, u"wt") as file_handler:
1093         file_handler.write(header_str)
1094         for test in tbl_lst:
1095             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1096
1097     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1098     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1099
1100
1101 def _generate_url(testbed, test_name):
1102     """Generate URL to a trending plot from the name of the test case.
1103
1104     :param testbed: The testbed used for testing.
1105     :param test_name: The name of the test case.
1106     :type testbed: str
1107     :type test_name: str
1108     :returns: The URL to the plot with the trending data for the given test
1109         case.
1110     :rtype str
1111     """
1112
1113     if u"x520" in test_name:
1114         nic = u"x520"
1115     elif u"x710" in test_name:
1116         nic = u"x710"
1117     elif u"xl710" in test_name:
1118         nic = u"xl710"
1119     elif u"xxv710" in test_name:
1120         nic = u"xxv710"
1121     elif u"vic1227" in test_name:
1122         nic = u"vic1227"
1123     elif u"vic1385" in test_name:
1124         nic = u"vic1385"
1125     elif u"x553" in test_name:
1126         nic = u"x553"
1127     elif u"cx556" in test_name or u"cx556a" in test_name:
1128         nic = u"cx556a"
1129     elif u"ena" in test_name:
1130         nic = u"nitro50g"
1131     else:
1132         nic = u""
1133
1134     if u"64b" in test_name:
1135         frame_size = u"64b"
1136     elif u"78b" in test_name:
1137         frame_size = u"78b"
1138     elif u"imix" in test_name:
1139         frame_size = u"imix"
1140     elif u"9000b" in test_name:
1141         frame_size = u"9000b"
1142     elif u"1518b" in test_name:
1143         frame_size = u"1518b"
1144     elif u"114b" in test_name:
1145         frame_size = u"114b"
1146     else:
1147         frame_size = u""
1148
1149     if u"1t1c" in test_name or \
1150         (u"-1c-" in test_name and
1151          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1152         cores = u"1t1c"
1153     elif u"2t2c" in test_name or \
1154          (u"-2c-" in test_name and
1155           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1156         cores = u"2t2c"
1157     elif u"4t4c" in test_name or \
1158          (u"-4c-" in test_name and
1159           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1160         cores = u"4t4c"
1161     elif u"2t1c" in test_name or \
1162          (u"-1c-" in test_name and
1163           testbed in
1164           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1165            u"2n-aws", u"3n-aws")):
1166         cores = u"2t1c"
1167     elif u"4t2c" in test_name or \
1168          (u"-2c-" in test_name and
1169           testbed in
1170           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1171            u"2n-aws", u"3n-aws")):
1172         cores = u"4t2c"
1173     elif u"8t4c" in test_name or \
1174          (u"-4c-" in test_name and
1175           testbed in
1176           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1177            u"2n-aws", u"3n-aws")):
1178         cores = u"8t4c"
1179     else:
1180         cores = u""
1181
1182     if u"testpmd" in test_name:
1183         driver = u"testpmd"
1184     elif u"l3fwd" in test_name:
1185         driver = u"l3fwd"
1186     elif u"avf" in test_name:
1187         driver = u"avf"
1188     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1189         driver = u"af_xdp"
1190     elif u"rdma" in test_name:
1191         driver = u"rdma"
1192     elif u"dnv" in testbed or u"tsh" in testbed:
1193         driver = u"ixgbe"
1194     elif u"ena" in test_name:
1195         driver = u"ena"
1196     else:
1197         driver = u"dpdk"
1198
1199     if u"macip-iacl1s" in test_name:
1200         bsf = u"features-macip-iacl1"
1201     elif u"macip-iacl10s" in test_name:
1202         bsf = u"features-macip-iacl10"
1203     elif u"macip-iacl50s" in test_name:
1204         bsf = u"features-macip-iacl50"
1205     elif u"iacl1s" in test_name:
1206         bsf = u"features-iacl1"
1207     elif u"iacl10s" in test_name:
1208         bsf = u"features-iacl10"
1209     elif u"iacl50s" in test_name:
1210         bsf = u"features-iacl50"
1211     elif u"oacl1s" in test_name:
1212         bsf = u"features-oacl1"
1213     elif u"oacl10s" in test_name:
1214         bsf = u"features-oacl10"
1215     elif u"oacl50s" in test_name:
1216         bsf = u"features-oacl50"
1217     elif u"nat44det" in test_name:
1218         bsf = u"nat44det-bidir"
1219     elif u"nat44ed" in test_name and u"udir" in test_name:
1220         bsf = u"nat44ed-udir"
1221     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1222         bsf = u"udp-cps"
1223     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1224         bsf = u"tcp-cps"
1225     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1226         bsf = u"udp-pps"
1227     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1228         bsf = u"tcp-pps"
1229     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1230         bsf = u"udp-tput"
1231     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1232         bsf = u"tcp-tput"
1233     elif u"udpsrcscale" in test_name:
1234         bsf = u"features-udp"
1235     elif u"iacl" in test_name:
1236         bsf = u"features"
1237     elif u"policer" in test_name:
1238         bsf = u"features"
1239     elif u"adl" in test_name:
1240         bsf = u"features"
1241     elif u"cop" in test_name:
1242         bsf = u"features"
1243     elif u"nat" in test_name:
1244         bsf = u"features"
1245     elif u"macip" in test_name:
1246         bsf = u"features"
1247     elif u"scale" in test_name:
1248         bsf = u"scale"
1249     elif u"base" in test_name:
1250         bsf = u"base"
1251     else:
1252         bsf = u"base"
1253
1254     if u"114b" in test_name and u"vhost" in test_name:
1255         domain = u"vts"
1256     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1257         domain = u"nat44"
1258         if u"nat44det" in test_name:
1259             domain += u"-det-bidir"
1260         else:
1261             domain += u"-ed"
1262         if u"udir" in test_name:
1263             domain += u"-unidir"
1264         elif u"-ethip4udp-" in test_name:
1265             domain += u"-udp"
1266         elif u"-ethip4tcp-" in test_name:
1267             domain += u"-tcp"
1268         if u"-cps" in test_name:
1269             domain += u"-cps"
1270         elif u"-pps" in test_name:
1271             domain += u"-pps"
1272         elif u"-tput" in test_name:
1273             domain += u"-tput"
1274     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1275         domain = u"dpdk"
1276     elif u"memif" in test_name:
1277         domain = u"container_memif"
1278     elif u"srv6" in test_name:
1279         domain = u"srv6"
1280     elif u"vhost" in test_name:
1281         domain = u"vhost"
1282         if u"vppl2xc" in test_name:
1283             driver += u"-vpp"
1284         else:
1285             driver += u"-testpmd"
1286         if u"lbvpplacp" in test_name:
1287             bsf += u"-link-bonding"
1288     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1289         domain = u"nf_service_density_vnfc"
1290     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1291         domain = u"nf_service_density_cnfc"
1292     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1293         domain = u"nf_service_density_cnfp"
1294     elif u"ipsec" in test_name:
1295         domain = u"ipsec"
1296         if u"sw" in test_name:
1297             bsf += u"-sw"
1298         elif u"hw" in test_name:
1299             bsf += u"-hw"
1300         elif u"spe" in test_name:
1301             bsf += u"-spe"
1302     elif u"ethip4vxlan" in test_name:
1303         domain = u"ip4_tunnels"
1304     elif u"ethip4udpgeneve" in test_name:
1305         domain = u"ip4_tunnels"
1306     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1307         domain = u"ip4"
1308     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1309         domain = u"ip6"
1310     elif u"l2xcbase" in test_name or \
1311             u"l2xcscale" in test_name or \
1312             u"l2bdbasemaclrn" in test_name or \
1313             u"l2bdscale" in test_name or \
1314             u"l2patch" in test_name:
1315         domain = u"l2"
1316     else:
1317         domain = u""
1318
1319     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1320     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1321
1322     return file_name + anchor_name
1323
1324
1325 def table_perf_trending_dash_html(table, input_data):
1326     """Generate the table(s) with algorithm:
1327     table_perf_trending_dash_html specified in the specification
1328     file.
1329
1330     :param table: Table to generate.
1331     :param input_data: Data to process.
1332     :type table: dict
1333     :type input_data: InputData
1334     """
1335
1336     _ = input_data
1337
1338     if not table.get(u"testbed", None):
1339         logging.error(
1340             f"The testbed is not defined for the table "
1341             f"{table.get(u'title', u'')}. Skipping."
1342         )
1343         return
1344
1345     test_type = table.get(u"test-type", u"MRR")
1346     if test_type not in (u"MRR", u"NDR", u"PDR"):
1347         logging.error(
1348             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1349             f"Skipping."
1350         )
1351         return
1352
1353     if test_type in (u"NDR", u"PDR"):
1354         lnk_dir = u"../ndrpdr_trending/"
1355         lnk_sufix = f"-{test_type.lower()}"
1356     else:
1357         lnk_dir = u"../trending/"
1358         lnk_sufix = u""
1359
1360     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1361
1362     try:
1363         with open(table[u"input-file"], u'rt') as csv_file:
1364             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1365     except FileNotFoundError as err:
1366         logging.warning(f"{err}")
1367         return
1368     except KeyError:
1369         logging.warning(u"The input file is not defined.")
1370         return
1371     except csv.Error as err:
1372         logging.warning(
1373             f"Not possible to process the file {table[u'input-file']}.\n"
1374             f"{repr(err)}"
1375         )
1376         return
1377
1378     # Table:
1379     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1380
1381     # Table header:
1382     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1383     for idx, item in enumerate(csv_lst[0]):
1384         alignment = u"left" if idx == 0 else u"center"
1385         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1386         thead.text = item
1387
1388     # Rows:
1389     colors = {
1390         u"regression": (
1391             u"#ffcccc",
1392             u"#ff9999"
1393         ),
1394         u"progression": (
1395             u"#c6ecc6",
1396             u"#9fdf9f"
1397         ),
1398         u"normal": (
1399             u"#e9f1fb",
1400             u"#d4e4f7"
1401         )
1402     }
1403     for r_idx, row in enumerate(csv_lst[1:]):
1404         if int(row[4]):
1405             color = u"regression"
1406         elif int(row[5]):
1407             color = u"progression"
1408         else:
1409             color = u"normal"
1410         trow = ET.SubElement(
1411             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1412         )
1413
1414         # Columns:
1415         for c_idx, item in enumerate(row):
1416             tdata = ET.SubElement(
1417                 trow,
1418                 u"td",
1419                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1420             )
1421             # Name:
1422             if c_idx == 0 and table.get(u"add-links", True):
1423                 ref = ET.SubElement(
1424                     tdata,
1425                     u"a",
1426                     attrib=dict(
1427                         href=f"{lnk_dir}"
1428                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1429                         f"{lnk_sufix}"
1430                     )
1431                 )
1432                 ref.text = item
1433             else:
1434                 tdata.text = item
1435     try:
1436         with open(table[u"output-file"], u'w') as html_file:
1437             logging.info(f"    Writing file: {table[u'output-file']}")
1438             html_file.write(u".. raw:: html\n\n\t")
1439             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1440             html_file.write(u"\n\t<p><br><br></p>\n")
1441     except KeyError:
1442         logging.warning(u"The output file is not defined.")
1443         return
1444
1445
1446 def table_last_failed_tests(table, input_data):
1447     """Generate the table(s) with algorithm: table_last_failed_tests
1448     specified in the specification file.
1449
1450     :param table: Table to generate.
1451     :param input_data: Data to process.
1452     :type table: pandas.Series
1453     :type input_data: InputData
1454     """
1455
1456     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1457
1458     # Transform the data
1459     logging.info(
1460         f"    Creating the data set for the {table.get(u'type', u'')} "
1461         f"{table.get(u'title', u'')}."
1462     )
1463
1464     data = input_data.filter_data(table, continue_on_error=True)
1465
1466     if data is None or data.empty:
1467         logging.warning(
1468             f"    No data for the {table.get(u'type', u'')} "
1469             f"{table.get(u'title', u'')}."
1470         )
1471         return
1472
1473     tbl_list = list()
1474     for job, builds in table[u"data"].items():
1475         for build in builds:
1476             build = str(build)
1477             try:
1478                 version = input_data.metadata(job, build).get(u"version", u"")
1479                 duration = \
1480                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1481             except KeyError:
1482                 logging.error(f"Data for {job}: {build} is not present.")
1483                 return
1484             tbl_list.append(build)
1485             tbl_list.append(version)
1486             failed_tests = list()
1487             passed = 0
1488             failed = 0
1489             for tst_data in data[job][build].values:
1490                 if tst_data[u"status"] != u"FAIL":
1491                     passed += 1
1492                     continue
1493                 failed += 1
1494                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1495                 if not groups:
1496                     continue
1497                 nic = groups.group(0)
1498                 msg = tst_data[u'msg'].replace(u"\n", u"")
1499                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1500                              'xxx.xxx.xxx.xxx', msg)
1501                 msg = msg.split(u'Also teardown failed')[0]
1502                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1503             tbl_list.append(passed)
1504             tbl_list.append(failed)
1505             tbl_list.append(duration)
1506             tbl_list.extend(failed_tests)
1507
1508     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1509     logging.info(f"    Writing file: {file_name}")
1510     with open(file_name, u"wt") as file_handler:
1511         for test in tbl_list:
1512             file_handler.write(f"{test}\n")
1513
1514
1515 def table_failed_tests(table, input_data):
1516     """Generate the table(s) with algorithm: table_failed_tests
1517     specified in the specification file.
1518
1519     :param table: Table to generate.
1520     :param input_data: Data to process.
1521     :type table: pandas.Series
1522     :type input_data: InputData
1523     """
1524
1525     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1526
1527     # Transform the data
1528     logging.info(
1529         f"    Creating the data set for the {table.get(u'type', u'')} "
1530         f"{table.get(u'title', u'')}."
1531     )
1532     data = input_data.filter_data(table, continue_on_error=True)
1533
1534     test_type = u"MRR"
1535     if u"NDRPDR" in table.get(u"filter", list()):
1536         test_type = u"NDRPDR"
1537
1538     # Prepare the header of the tables
1539     header = [
1540         u"Test Case",
1541         u"Failures [#]",
1542         u"Last Failure [Time]",
1543         u"Last Failure [VPP-Build-Id]",
1544         u"Last Failure [CSIT-Job-Build-Id]"
1545     ]
1546
1547     # Generate the data for the table according to the model in the table
1548     # specification
1549
1550     now = dt.utcnow()
1551     timeperiod = timedelta(int(table.get(u"window", 7)))
1552
1553     tbl_dict = dict()
1554     for job, builds in table[u"data"].items():
1555         for build in builds:
1556             build = str(build)
1557             for tst_name, tst_data in data[job][build].items():
1558                 if tst_name.lower() in table.get(u"ignore-list", list()):
1559                     continue
1560                 if tbl_dict.get(tst_name, None) is None:
1561                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1562                     if not groups:
1563                         continue
1564                     nic = groups.group(0)
1565                     tbl_dict[tst_name] = {
1566                         u"name": f"{nic}-{tst_data[u'name']}",
1567                         u"data": OrderedDict()
1568                     }
1569                 try:
1570                     generated = input_data.metadata(job, build).\
1571                         get(u"generated", u"")
1572                     if not generated:
1573                         continue
1574                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1575                     if (now - then) <= timeperiod:
1576                         tbl_dict[tst_name][u"data"][build] = (
1577                             tst_data[u"status"],
1578                             generated,
1579                             input_data.metadata(job, build).get(u"version",
1580                                                                 u""),
1581                             build
1582                         )
1583                 except (TypeError, KeyError) as err:
1584                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1585
1586     max_fails = 0
1587     tbl_lst = list()
1588     for tst_data in tbl_dict.values():
1589         fails_nr = 0
1590         fails_last_date = u""
1591         fails_last_vpp = u""
1592         fails_last_csit = u""
1593         for val in tst_data[u"data"].values():
1594             if val[0] == u"FAIL":
1595                 fails_nr += 1
1596                 fails_last_date = val[1]
1597                 fails_last_vpp = val[2]
1598                 fails_last_csit = val[3]
1599         if fails_nr:
1600             max_fails = fails_nr if fails_nr > max_fails else max_fails
1601             tbl_lst.append([
1602                 tst_data[u"name"],
1603                 fails_nr,
1604                 fails_last_date,
1605                 fails_last_vpp,
1606                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1607                 f"-build-{fails_last_csit}"
1608             ])
1609
1610     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1611     tbl_sorted = list()
1612     for nrf in range(max_fails, -1, -1):
1613         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1614         tbl_sorted.extend(tbl_fails)
1615
1616     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1617     logging.info(f"    Writing file: {file_name}")
1618     with open(file_name, u"wt") as file_handler:
1619         file_handler.write(u",".join(header) + u"\n")
1620         for test in tbl_sorted:
1621             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1622
1623     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1624     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1625
1626
1627 def table_failed_tests_html(table, input_data):
1628     """Generate the table(s) with algorithm: table_failed_tests_html
1629     specified in the specification file.
1630
1631     :param table: Table to generate.
1632     :param input_data: Data to process.
1633     :type table: pandas.Series
1634     :type input_data: InputData
1635     """
1636
1637     _ = input_data
1638
1639     if not table.get(u"testbed", None):
1640         logging.error(
1641             f"The testbed is not defined for the table "
1642             f"{table.get(u'title', u'')}. Skipping."
1643         )
1644         return
1645
1646     test_type = table.get(u"test-type", u"MRR")
1647     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1648         logging.error(
1649             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1650             f"Skipping."
1651         )
1652         return
1653
1654     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1655         lnk_dir = u"../ndrpdr_trending/"
1656         lnk_sufix = u"-pdr"
1657     else:
1658         lnk_dir = u"../trending/"
1659         lnk_sufix = u""
1660
1661     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1662
1663     try:
1664         with open(table[u"input-file"], u'rt') as csv_file:
1665             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1666     except KeyError:
1667         logging.warning(u"The input file is not defined.")
1668         return
1669     except csv.Error as err:
1670         logging.warning(
1671             f"Not possible to process the file {table[u'input-file']}.\n"
1672             f"{repr(err)}"
1673         )
1674         return
1675
1676     # Table:
1677     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1678
1679     # Table header:
1680     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1681     for idx, item in enumerate(csv_lst[0]):
1682         alignment = u"left" if idx == 0 else u"center"
1683         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1684         thead.text = item
1685
1686     # Rows:
1687     colors = (u"#e9f1fb", u"#d4e4f7")
1688     for r_idx, row in enumerate(csv_lst[1:]):
1689         background = colors[r_idx % 2]
1690         trow = ET.SubElement(
1691             failed_tests, u"tr", attrib=dict(bgcolor=background)
1692         )
1693
1694         # Columns:
1695         for c_idx, item in enumerate(row):
1696             tdata = ET.SubElement(
1697                 trow,
1698                 u"td",
1699                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1700             )
1701             # Name:
1702             if c_idx == 0 and table.get(u"add-links", True):
1703                 ref = ET.SubElement(
1704                     tdata,
1705                     u"a",
1706                     attrib=dict(
1707                         href=f"{lnk_dir}"
1708                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1709                         f"{lnk_sufix}"
1710                     )
1711                 )
1712                 ref.text = item
1713             else:
1714                 tdata.text = item
1715     try:
1716         with open(table[u"output-file"], u'w') as html_file:
1717             logging.info(f"    Writing file: {table[u'output-file']}")
1718             html_file.write(u".. raw:: html\n\n\t")
1719             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1720             html_file.write(u"\n\t<p><br><br></p>\n")
1721     except KeyError:
1722         logging.warning(u"The output file is not defined.")
1723         return
1724
1725
1726 def table_comparison(table, input_data):
1727     """Generate the table(s) with algorithm: table_comparison
1728     specified in the specification file.
1729
1730     :param table: Table to generate.
1731     :param input_data: Data to process.
1732     :type table: pandas.Series
1733     :type input_data: InputData
1734     """
1735     logging.info(f"  Generating the table {table.get('title', '')} ...")
1736
1737     # Transform the data
1738     logging.info(
1739         f"    Creating the data set for the {table.get('type', '')} "
1740         f"{table.get('title', '')}."
1741     )
1742
1743     normalize = table.get('normalize', False)
1744
1745     columns = table.get("columns", None)
1746     if not columns:
1747         logging.error(
1748             f"No columns specified for {table.get('title', '')}. Skipping."
1749         )
1750         return
1751
1752     cols = list()
1753     for idx, col in enumerate(columns):
1754         if col.get("data-set", None) is None:
1755             logging.warning(f"No data for column {col.get('title', '')}")
1756             continue
1757         tag = col.get("tag", None)
1758         data = input_data.filter_data(
1759             table,
1760             params=[
1761                 "throughput",
1762                 "result",
1763                 "latency",
1764                 "name",
1765                 "parent",
1766                 "tags"
1767             ],
1768             data=col["data-set"],
1769             continue_on_error=True
1770         )
1771         col_data = {
1772             "title": col.get("title", f"Column{idx}"),
1773             "data": dict()
1774         }
1775         for builds in data.values:
1776             for build in builds:
1777                 for tst_name, tst_data in build.items():
1778                     if tag and tag not in tst_data["tags"]:
1779                         continue
1780                     tst_name_mod = \
1781                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1782                         replace("2n1l-", "")
1783                     if col_data["data"].get(tst_name_mod, None) is None:
1784                         name = tst_data['name'].rsplit('-', 1)[0]
1785                         if "across testbeds" in table["title"].lower() or \
1786                                 "across topologies" in table["title"].lower():
1787                             name = _tpc_modify_displayed_test_name(name)
1788                         col_data["data"][tst_name_mod] = {
1789                             "name": name,
1790                             "replace": True,
1791                             "data": list(),
1792                             "mean": None,
1793                             "stdev": None
1794                         }
1795                     _tpc_insert_data(
1796                         target=col_data["data"][tst_name_mod],
1797                         src=tst_data,
1798                         include_tests=table["include-tests"]
1799                     )
1800
1801         replacement = col.get("data-replacement", None)
1802         if replacement:
1803             rpl_data = input_data.filter_data(
1804                 table,
1805                 params=[
1806                     "throughput",
1807                     "result",
1808                     "latency",
1809                     "name",
1810                     "parent",
1811                     "tags"
1812                 ],
1813                 data=replacement,
1814                 continue_on_error=True
1815             )
1816             for builds in rpl_data.values:
1817                 for build in builds:
1818                     for tst_name, tst_data in build.items():
1819                         if tag and tag not in tst_data["tags"]:
1820                             continue
1821                         tst_name_mod = \
1822                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1823                             replace("2n1l-", "")
1824                         if col_data["data"].get(tst_name_mod, None) is None:
1825                             name = tst_data['name'].rsplit('-', 1)[0]
1826                             if "across testbeds" in table["title"].lower() \
1827                                     or "across topologies" in \
1828                                     table["title"].lower():
1829                                 name = _tpc_modify_displayed_test_name(name)
1830                             col_data["data"][tst_name_mod] = {
1831                                 "name": name,
1832                                 "replace": False,
1833                                 "data": list(),
1834                                 "mean": None,
1835                                 "stdev": None
1836                             }
1837                         if col_data["data"][tst_name_mod]["replace"]:
1838                             col_data["data"][tst_name_mod]["replace"] = False
1839                             col_data["data"][tst_name_mod]["data"] = list()
1840                         _tpc_insert_data(
1841                             target=col_data["data"][tst_name_mod],
1842                             src=tst_data,
1843                             include_tests=table["include-tests"]
1844                         )
1845
1846         if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
1847                 or "latency" in table["include-tests"]:
1848             for tst_name, tst_data in col_data["data"].items():
1849                 if tst_data["data"]:
1850                     tst_data["mean"] = mean(tst_data["data"])
1851                     tst_data["stdev"] = stdev(tst_data["data"])
1852
1853         cols.append(col_data)
1854
1855     tbl_dict = dict()
1856     for col in cols:
1857         for tst_name, tst_data in col["data"].items():
1858             if tbl_dict.get(tst_name, None) is None:
1859                 tbl_dict[tst_name] = {
1860                     "name": tst_data["name"]
1861                 }
1862             tbl_dict[tst_name][col["title"]] = {
1863                 "mean": tst_data["mean"],
1864                 "stdev": tst_data["stdev"]
1865             }
1866
1867     if not tbl_dict:
1868         logging.warning(f"No data for table {table.get('title', '')}!")
1869         return
1870
1871     tbl_lst = list()
1872     for tst_data in tbl_dict.values():
1873         row = [tst_data[u"name"], ]
1874         for col in cols:
1875             row_data = tst_data.get(col["title"], None)
1876             if normalize and row_data:
1877                 groups = re.search(REGEX_TOPO_ARCH, col["title"])
1878                 topo_arch = groups.group(0) if groups else ""
1879                 norm_factor = table["norm_factor"].get(topo_arch, 1.0)
1880                 row_data_norm = {
1881                     "mean": row_data["mean"] * norm_factor,
1882                     "stdev": row_data["stdev"] * norm_factor
1883                 }
1884             else:
1885                 row_data_norm = row_data
1886             row.append(row_data_norm)
1887         tbl_lst.append(row)
1888
1889     comparisons = table.get("comparisons", None)
1890     rcas = list()
1891     if comparisons and isinstance(comparisons, list):
1892         for idx, comp in enumerate(comparisons):
1893             try:
1894                 col_ref = int(comp["reference"])
1895                 col_cmp = int(comp["compare"])
1896             except KeyError:
1897                 logging.warning("Comparison: No references defined! Skipping.")
1898                 comparisons.pop(idx)
1899                 continue
1900             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1901                     col_ref == col_cmp):
1902                 logging.warning(f"Wrong values of reference={col_ref} "
1903                                 f"and/or compare={col_cmp}. Skipping.")
1904                 comparisons.pop(idx)
1905                 continue
1906             rca_file_name = comp.get("rca-file", None)
1907             if rca_file_name:
1908                 try:
1909                     with open(rca_file_name, "r") as file_handler:
1910                         rcas.append(
1911                             {
1912                                 "title": f"RCA{idx + 1}",
1913                                 "data": load(file_handler, Loader=FullLoader)
1914                             }
1915                         )
1916                 except (YAMLError, IOError) as err:
1917                     logging.warning(
1918                         f"The RCA file {rca_file_name} does not exist or "
1919                         f"it is corrupted!"
1920                     )
1921                     logging.debug(repr(err))
1922                     rcas.append(None)
1923             else:
1924                 rcas.append(None)
1925     else:
1926         comparisons = None
1927
1928     tbl_cmp_lst = list()
1929     if comparisons:
1930         for row in tbl_lst:
1931             new_row = deepcopy(row)
1932             for comp in comparisons:
1933                 ref_itm = row[int(comp["reference"])]
1934                 if ref_itm is None and \
1935                         comp.get("reference-alt", None) is not None:
1936                     ref_itm = row[int(comp["reference-alt"])]
1937                 cmp_itm = row[int(comp[u"compare"])]
1938                 if ref_itm is not None and cmp_itm is not None and \
1939                         ref_itm["mean"] is not None and \
1940                         cmp_itm["mean"] is not None and \
1941                         ref_itm["stdev"] is not None and \
1942                         cmp_itm["stdev"] is not None:
1943                     try:
1944                         delta, d_stdev = relative_change_stdev(
1945                             ref_itm["mean"], cmp_itm["mean"],
1946                             ref_itm["stdev"], cmp_itm["stdev"]
1947                         )
1948                     except ZeroDivisionError:
1949                         break
1950                     if delta is None or math.isnan(delta):
1951                         break
1952                     new_row.append({
1953                         "mean": delta * 1e6,
1954                         "stdev": d_stdev * 1e6
1955                     })
1956                 else:
1957                     break
1958             else:
1959                 tbl_cmp_lst.append(new_row)
1960
1961     try:
1962         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1963         tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
1964     except TypeError as err:
1965         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1966
1967     tbl_for_csv = list()
1968     for line in tbl_cmp_lst:
1969         row = [line[0], ]
1970         for idx, itm in enumerate(line[1:]):
1971             if itm is None or not isinstance(itm, dict) or\
1972                     itm.get('mean', None) is None or \
1973                     itm.get('stdev', None) is None:
1974                 row.append("NT")
1975                 row.append("NT")
1976             else:
1977                 row.append(round(float(itm['mean']) / 1e6, 3))
1978                 row.append(round(float(itm['stdev']) / 1e6, 3))
1979         for rca in rcas:
1980             if rca is None:
1981                 continue
1982             rca_nr = rca["data"].get(row[0], "-")
1983             row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
1984         tbl_for_csv.append(row)
1985
1986     header_csv = ["Test Case", ]
1987     for col in cols:
1988         header_csv.append(f"Avg({col['title']})")
1989         header_csv.append(f"Stdev({col['title']})")
1990     for comp in comparisons:
1991         header_csv.append(
1992             f"Avg({comp.get('title', '')})"
1993         )
1994         header_csv.append(
1995             f"Stdev({comp.get('title', '')})"
1996         )
1997     for rca in rcas:
1998         if rca:
1999             header_csv.append(rca["title"])
2000
2001     legend_lst = table.get("legend", None)
2002     if legend_lst is None:
2003         legend = ""
2004     else:
2005         legend = "\n" + "\n".join(legend_lst) + "\n"
2006
2007     footnote = ""
2008     if rcas and any(rcas):
2009         footnote += "\nRoot Cause Analysis:\n"
2010         for rca in rcas:
2011             if rca:
2012                 footnote += f"{rca['data'].get('footnote', '')}\n"
2013
2014     csv_file_name = f"{table['output-file']}-csv.csv"
2015     with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2016         file_handler.write(
2017             ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
2018         )
2019         for test in tbl_for_csv:
2020             file_handler.write(
2021                 ",".join([f'"{item}"' for item in test]) + "\n"
2022             )
2023         if legend_lst:
2024             for item in legend_lst:
2025                 file_handler.write(f'"{item}"\n')
2026         if footnote:
2027             for itm in footnote.split("\n"):
2028                 file_handler.write(f'"{itm}"\n')
2029
2030     tbl_tmp = list()
2031     max_lens = [0, ] * len(tbl_cmp_lst[0])
2032     for line in tbl_cmp_lst:
2033         row = [line[0], ]
2034         for idx, itm in enumerate(line[1:]):
2035             if itm is None or not isinstance(itm, dict) or \
2036                     itm.get('mean', None) is None or \
2037                     itm.get('stdev', None) is None:
2038                 new_itm = "NT"
2039             else:
2040                 if idx < len(cols):
2041                     new_itm = (
2042                         f"{round(float(itm['mean']) / 1e6, 2)} "
2043                         f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2044                         replace("nan", "NaN")
2045                     )
2046                 else:
2047                     new_itm = (
2048                         f"{round(float(itm['mean']) / 1e6, 2):+} "
2049                         f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2050                         replace("nan", "NaN")
2051                     )
2052             if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
2053                 max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
2054             row.append(new_itm)
2055
2056         tbl_tmp.append(row)
2057
2058     header = ["Test Case", ]
2059     header.extend([col["title"] for col in cols])
2060     header.extend([comp.get("title", "") for comp in comparisons])
2061
2062     tbl_final = list()
2063     for line in tbl_tmp:
2064         row = [line[0], ]
2065         for idx, itm in enumerate(line[1:]):
2066             if itm in ("NT", "NaN"):
2067                 row.append(itm)
2068                 continue
2069             itm_lst = itm.rsplit("\u00B1", 1)
2070             itm_lst[-1] = \
2071                 f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2072             itm_str = "\u00B1".join(itm_lst)
2073
2074             if idx >= len(cols):
2075                 # Diffs
2076                 rca = rcas[idx - len(cols)]
2077                 if rca:
2078                     # Add rcas to diffs
2079                     rca_nr = rca["data"].get(row[0], None)
2080                     if rca_nr:
2081                         hdr_len = len(header[idx + 1]) - 1
2082                         if hdr_len < 19:
2083                             hdr_len = 19
2084                         rca_nr = f"[{rca_nr}]"
2085                         itm_str = (
2086                             f"{' ' * (4 - len(rca_nr))}{rca_nr}"
2087                             f"{' ' * (hdr_len - 4 - len(itm_str))}"
2088                             f"{itm_str}"
2089                         )
2090             row.append(itm_str)
2091         tbl_final.append(row)
2092
2093     # Generate csv tables:
2094     csv_file_name = f"{table['output-file']}.csv"
2095     logging.info(f"    Writing the file {csv_file_name}")
2096     with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2097         file_handler.write(";".join(header) + "\n")
2098         for test in tbl_final:
2099             file_handler.write(";".join([str(item) for item in test]) + "\n")
2100
2101     # Generate txt table:
2102     txt_file_name = f"{table['output-file']}.txt"
2103     logging.info(f"    Writing the file {txt_file_name}")
2104     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
2105
2106     with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
2107         file_handler.write(legend)
2108         file_handler.write(footnote)
2109
2110     # Generate html table:
2111     _tpc_generate_html_table(
2112         header,
2113         tbl_final,
2114         table['output-file'],
2115         legend=legend,
2116         footnote=footnote,
2117         sort_data=False,
2118         title=table.get("title", "")
2119     )
2120
2121
2122 def table_weekly_comparison(table, in_data):
2123     """Generate the table(s) with algorithm: table_weekly_comparison
2124     specified in the specification file.
2125
2126     :param table: Table to generate.
2127     :param in_data: Data to process.
2128     :type table: pandas.Series
2129     :type in_data: InputData
2130     """
2131     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2132
2133     # Transform the data
2134     logging.info(
2135         f"    Creating the data set for the {table.get(u'type', u'')} "
2136         f"{table.get(u'title', u'')}."
2137     )
2138
2139     incl_tests = table.get(u"include-tests", None)
2140     if incl_tests not in (u"NDR", u"PDR"):
2141         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2142         return
2143
2144     nr_cols = table.get(u"nr-of-data-columns", None)
2145     if not nr_cols or nr_cols < 2:
2146         logging.error(
2147             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2148         )
2149         return
2150
2151     data = in_data.filter_data(
2152         table,
2153         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2154         continue_on_error=True
2155     )
2156
2157     header = [
2158         [u"VPP Version", ],
2159         [u"Start Timestamp", ],
2160         [u"CSIT Build", ],
2161         [u"CSIT Testbed", ]
2162     ]
2163     tbl_dict = dict()
2164     idx = 0
2165     tb_tbl = table.get(u"testbeds", None)
2166     for job_name, job_data in data.items():
2167         for build_nr, build in job_data.items():
2168             if idx >= nr_cols:
2169                 break
2170             if build.empty:
2171                 continue
2172
2173             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2174             if tb_ip and tb_tbl:
2175                 testbed = tb_tbl.get(tb_ip, u"")
2176             else:
2177                 testbed = u""
2178             header[2].insert(1, build_nr)
2179             header[3].insert(1, testbed)
2180             header[1].insert(
2181                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2182             )
2183             header[0].insert(
2184                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2185             )
2186
2187             for tst_name, tst_data in build.items():
2188                 tst_name_mod = \
2189                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2190                 if not tbl_dict.get(tst_name_mod, None):
2191                     tbl_dict[tst_name_mod] = dict(
2192                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2193                     )
2194                 try:
2195                     tbl_dict[tst_name_mod][-idx - 1] = \
2196                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2197                 except (TypeError, IndexError, KeyError, ValueError):
2198                     pass
2199             idx += 1
2200
2201     if idx < nr_cols:
2202         logging.error(u"Not enough data to build the table! Skipping")
2203         return
2204
2205     cmp_dict = dict()
2206     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2207         idx_ref = cmp.get(u"reference", None)
2208         idx_cmp = cmp.get(u"compare", None)
2209         if idx_ref is None or idx_cmp is None:
2210             continue
2211         header[0].append(
2212             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2213             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2214         )
2215         header[1].append(u"")
2216         header[2].append(u"")
2217         header[3].append(u"")
2218         for tst_name, tst_data in tbl_dict.items():
2219             if not cmp_dict.get(tst_name, None):
2220                 cmp_dict[tst_name] = list()
2221             ref_data = tst_data.get(idx_ref, None)
2222             cmp_data = tst_data.get(idx_cmp, None)
2223             if ref_data is None or cmp_data is None:
2224                 cmp_dict[tst_name].append(float(u'nan'))
2225             else:
2226                 cmp_dict[tst_name].append(
2227                     relative_change(ref_data, cmp_data)
2228                 )
2229
2230     tbl_lst_none = list()
2231     tbl_lst = list()
2232     for tst_name, tst_data in tbl_dict.items():
2233         itm_lst = [tst_data[u"name"], ]
2234         for idx in range(nr_cols):
2235             item = tst_data.get(-idx - 1, None)
2236             if item is None:
2237                 itm_lst.insert(1, None)
2238             else:
2239                 itm_lst.insert(1, round(item / 1e6, 1))
2240         itm_lst.extend(
2241             [
2242                 None if itm is None else round(itm, 1)
2243                 for itm in cmp_dict[tst_name]
2244             ]
2245         )
2246         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2247             tbl_lst_none.append(itm_lst)
2248         else:
2249             tbl_lst.append(itm_lst)
2250
2251     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2252     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2253     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2254     tbl_lst.extend(tbl_lst_none)
2255
2256     # Generate csv table:
2257     csv_file_name = f"{table[u'output-file']}.csv"
2258     logging.info(f"    Writing the file {csv_file_name}")
2259     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2260         for hdr in header:
2261             file_handler.write(u",".join(hdr) + u"\n")
2262         for test in tbl_lst:
2263             file_handler.write(u",".join(
2264                 [
2265                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2266                     replace(u"null", u"-") for item in test
2267                 ]
2268             ) + u"\n")
2269
2270     txt_file_name = f"{table[u'output-file']}.txt"
2271     logging.info(f"    Writing the file {txt_file_name}")
2272     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2273
2274     # Reorganize header in txt table
2275     txt_table = list()
2276     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2277         for line in list(file_handler):
2278             txt_table.append(line)
2279     try:
2280         txt_table.insert(5, txt_table.pop(2))
2281         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2282             file_handler.writelines(txt_table)
2283     except IndexError:
2284         pass
2285
2286     # Generate html table:
2287     hdr_html = [
2288         u"<br>".join(row) for row in zip(*header)
2289     ]
2290     _tpc_generate_html_table(
2291         hdr_html,
2292         tbl_lst,
2293         table[u'output-file'],
2294         sort_data=True,
2295         title=table.get(u"title", u""),
2296         generate_rst=False
2297     )