Report: Comparison tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43 NORM_FREQ = 2.0  # [GHz]
44
45
46 def generate_tables(spec, data):
47     """Generate all tables specified in the specification file.
48
49     :param spec: Specification read from the specification file.
50     :param data: Data to process.
51     :type spec: Specification
52     :type data: InputData
53     """
54
55     generator = {
56         "table_merged_details": table_merged_details,
57         "table_soak_vs_ndr": table_soak_vs_ndr,
58         "table_perf_trending_dash": table_perf_trending_dash,
59         "table_perf_trending_dash_html": table_perf_trending_dash_html,
60         "table_last_failed_tests": table_last_failed_tests,
61         "table_failed_tests": table_failed_tests,
62         "table_failed_tests_html": table_failed_tests_html,
63         "table_oper_data_html": table_oper_data_html,
64         "table_comparison": table_comparison,
65         "table_weekly_comparison": table_weekly_comparison,
66         "table_job_spec_duration": table_job_spec_duration
67     }
68
69     logging.info(u"Generating the tables ...")
70
71     norm_factor = dict()
72     for key, val in spec.environment.get("frequency", dict()).items():
73         norm_factor[key] = NORM_FREQ / val
74
75     for table in spec.tables:
76         try:
77             if table["algorithm"] == "table_weekly_comparison":
78                 table["testbeds"] = spec.environment.get("testbeds", None)
79             if table["algorithm"] == "table_comparison":
80                 table["norm_factor"] = norm_factor
81             generator[table["algorithm"]](table, data)
82         except NameError as err:
83             logging.error(
84                 f"Probably algorithm {table['algorithm']} is not defined: "
85                 f"{repr(err)}"
86             )
87     logging.info("Done.")
88
89
90 def table_job_spec_duration(table, input_data):
91     """Generate the table(s) with algorithm: table_job_spec_duration
92     specified in the specification file.
93
94     :param table: Table to generate.
95     :param input_data: Data to process.
96     :type table: pandas.Series
97     :type input_data: InputData
98     """
99
100     _ = input_data
101
102     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
103
104     jb_type = table.get(u"jb-type", None)
105
106     tbl_lst = list()
107     if jb_type == u"iterative":
108         for line in table.get(u"lines", tuple()):
109             tbl_itm = {
110                 u"name": line.get(u"job-spec", u""),
111                 u"data": list()
112             }
113             for job, builds in line.get(u"data-set", dict()).items():
114                 for build_nr in builds:
115                     try:
116                         minutes = input_data.metadata(
117                             job, str(build_nr)
118                         )[u"elapsedtime"] // 60000
119                     except (KeyError, IndexError, ValueError, AttributeError):
120                         continue
121                     tbl_itm[u"data"].append(minutes)
122             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
123             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
124             tbl_lst.append(tbl_itm)
125     elif jb_type == u"coverage":
126         job = table.get(u"data", None)
127         if not job:
128             return
129         for line in table.get(u"lines", tuple()):
130             try:
131                 tbl_itm = {
132                     u"name": line.get(u"job-spec", u""),
133                     u"mean": input_data.metadata(
134                         list(job.keys())[0], str(line[u"build"])
135                     )[u"elapsedtime"] // 60000,
136                     u"stdev": float(u"nan")
137                 }
138                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
139             except (KeyError, IndexError, ValueError, AttributeError):
140                 continue
141             tbl_lst.append(tbl_itm)
142     else:
143         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
144         return
145
146     for line in tbl_lst:
147         line[u"mean"] = \
148             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
149         if math.isnan(line[u"stdev"]):
150             line[u"stdev"] = u""
151         else:
152             line[u"stdev"] = \
153                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
154
155     if not tbl_lst:
156         return
157
158     rows = list()
159     for itm in tbl_lst:
160         rows.append([
161             itm[u"name"],
162             f"{len(itm[u'data'])}",
163             f"{itm[u'mean']} +- {itm[u'stdev']}"
164             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
165         ])
166
167     txt_table = prettytable.PrettyTable(
168         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
169     )
170     for row in rows:
171         txt_table.add_row(row)
172     txt_table.align = u"r"
173     txt_table.align[u"Job Specification"] = u"l"
174
175     file_name = f"{table.get(u'output-file', u'')}.txt"
176     with open(file_name, u"wt", encoding='utf-8') as txt_file:
177         txt_file.write(str(txt_table))
178
179
180 def table_oper_data_html(table, input_data):
181     """Generate the table(s) with algorithm: html_table_oper_data
182     specified in the specification file.
183
184     :param table: Table to generate.
185     :param input_data: Data to process.
186     :type table: pandas.Series
187     :type input_data: InputData
188     """
189
190     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
191     # Transform the data
192     logging.info(
193         f"    Creating the data set for the {table.get(u'type', u'')} "
194         f"{table.get(u'title', u'')}."
195     )
196     data = input_data.filter_data(
197         table,
198         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
199         continue_on_error=True
200     )
201     if data.empty:
202         return
203     data = input_data.merge_data(data)
204
205     sort_tests = table.get(u"sort", None)
206     if sort_tests:
207         args = dict(
208             inplace=True,
209             ascending=(sort_tests == u"ascending")
210         )
211         data.sort_index(**args)
212
213     suites = input_data.filter_data(
214         table,
215         continue_on_error=True,
216         data_set=u"suites"
217     )
218     if suites.empty:
219         return
220     suites = input_data.merge_data(suites)
221
222     def _generate_html_table(tst_data):
223         """Generate an HTML table with operational data for the given test.
224
225         :param tst_data: Test data to be used to generate the table.
226         :type tst_data: pandas.Series
227         :returns: HTML table with operational data.
228         :rtype: str
229         """
230
231         colors = {
232             u"header": u"#7eade7",
233             u"empty": u"#ffffff",
234             u"body": (u"#e9f1fb", u"#d4e4f7")
235         }
236
237         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
238
239         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
240         thead = ET.SubElement(
241             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242         )
243         thead.text = tst_data[u"name"]
244
245         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
246         thead = ET.SubElement(
247             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248         )
249         thead.text = u"\t"
250
251         if tst_data.get(u"telemetry-show-run", None) is None or \
252                 isinstance(tst_data[u"telemetry-show-run"], str):
253             trow = ET.SubElement(
254                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255             )
256             tcol = ET.SubElement(
257                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
258             )
259             tcol.text = u"No Data"
260
261             trow = ET.SubElement(
262                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
263             )
264             thead = ET.SubElement(
265                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
266             )
267             font = ET.SubElement(
268                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
269             )
270             font.text = u"."
271             return str(ET.tostring(tbl, encoding=u"unicode"))
272
273         tbl_hdr = (
274             u"Name",
275             u"Nr of Vectors",
276             u"Nr of Packets",
277             u"Suspends",
278             u"Cycles per Packet",
279             u"Average Vector Size"
280         )
281
282         for dut_data in tst_data[u"telemetry-show-run"].values():
283             trow = ET.SubElement(
284                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
285             )
286             tcol = ET.SubElement(
287                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
288             )
289             if dut_data.get(u"runtime", None) is None:
290                 tcol.text = u"No Data"
291                 continue
292
293             runtime = dict()
294             for item in dut_data[u"runtime"].get(u"data", tuple()):
295                 tid = int(item[u"labels"][u"thread_id"])
296                 if runtime.get(tid, None) is None:
297                     runtime[tid] = dict()
298                 gnode = item[u"labels"][u"graph_node"]
299                 if runtime[tid].get(gnode, None) is None:
300                     runtime[tid][gnode] = dict()
301                 try:
302                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
303                 except ValueError:
304                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
305
306             threads = dict({idx: list() for idx in range(len(runtime))})
307             for idx, run_data in runtime.items():
308                 for gnode, gdata in run_data.items():
309                     threads[idx].append([
310                         gnode,
311                         int(gdata[u"calls"]),
312                         int(gdata[u"vectors"]),
313                         int(gdata[u"suspends"]),
314                         float(gdata[u"clocks"]),
315                         float(gdata[u"vectors"] / gdata[u"calls"]) \
316                             if gdata[u"calls"] else 0.0
317                     ])
318
319             bold = ET.SubElement(tcol, u"b")
320             bold.text = (
321                 f"Host IP: {dut_data.get(u'host', '')}, "
322                 f"Socket: {dut_data.get(u'socket', '')}"
323             )
324             trow = ET.SubElement(
325                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
326             )
327             thead = ET.SubElement(
328                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
329             )
330             thead.text = u"\t"
331
332             for thread_nr, thread in threads.items():
333                 trow = ET.SubElement(
334                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
335                 )
336                 tcol = ET.SubElement(
337                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
338                 )
339                 bold = ET.SubElement(tcol, u"b")
340                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
341                 trow = ET.SubElement(
342                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
343                 )
344                 for idx, col in enumerate(tbl_hdr):
345                     tcol = ET.SubElement(
346                         trow, u"td",
347                         attrib=dict(align=u"right" if idx else u"left")
348                     )
349                     font = ET.SubElement(
350                         tcol, u"font", attrib=dict(size=u"2")
351                     )
352                     bold = ET.SubElement(font, u"b")
353                     bold.text = col
354                 for row_nr, row in enumerate(thread):
355                     trow = ET.SubElement(
356                         tbl, u"tr",
357                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
358                     )
359                     for idx, col in enumerate(row):
360                         tcol = ET.SubElement(
361                             trow, u"td",
362                             attrib=dict(align=u"right" if idx else u"left")
363                         )
364                         font = ET.SubElement(
365                             tcol, u"font", attrib=dict(size=u"2")
366                         )
367                         if isinstance(col, float):
368                             font.text = f"{col:.2f}"
369                         else:
370                             font.text = str(col)
371                 trow = ET.SubElement(
372                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
373                 )
374                 thead = ET.SubElement(
375                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
376                 )
377                 thead.text = u"\t"
378
379         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
380         thead = ET.SubElement(
381             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
382         )
383         font = ET.SubElement(
384             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
385         )
386         font.text = u"."
387
388         return str(ET.tostring(tbl, encoding=u"unicode"))
389
390     for suite in suites.values:
391         html_table = str()
392         for test_data in data.values:
393             if test_data[u"parent"] not in suite[u"name"]:
394                 continue
395             html_table += _generate_html_table(test_data)
396         if not html_table:
397             continue
398         try:
399             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
400             with open(f"{file_name}", u'w') as html_file:
401                 logging.info(f"    Writing file: {file_name}")
402                 html_file.write(u".. raw:: html\n\n\t")
403                 html_file.write(html_table)
404                 html_file.write(u"\n\t<p><br><br></p>\n")
405         except KeyError:
406             logging.warning(u"The output file is not defined.")
407             return
408     logging.info(u"  Done.")
409
410
411 def table_merged_details(table, input_data):
412     """Generate the table(s) with algorithm: table_merged_details
413     specified in the specification file.
414
415     :param table: Table to generate.
416     :param input_data: Data to process.
417     :type table: pandas.Series
418     :type input_data: InputData
419     """
420
421     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
422
423     # Transform the data
424     logging.info(
425         f"    Creating the data set for the {table.get(u'type', u'')} "
426         f"{table.get(u'title', u'')}."
427     )
428     data = input_data.filter_data(table, continue_on_error=True)
429     data = input_data.merge_data(data)
430
431     sort_tests = table.get(u"sort", None)
432     if sort_tests:
433         args = dict(
434             inplace=True,
435             ascending=(sort_tests == u"ascending")
436         )
437         data.sort_index(**args)
438
439     suites = input_data.filter_data(
440         table, continue_on_error=True, data_set=u"suites")
441     suites = input_data.merge_data(suites)
442
443     # Prepare the header of the tables
444     header = list()
445     for column in table[u"columns"]:
446         header.append(
447             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
448         )
449
450     for suite in suites.values:
451         # Generate data
452         suite_name = suite[u"name"]
453         table_lst = list()
454         for test in data.keys():
455             if data[test][u"status"] != u"PASS" or \
456                     data[test][u"parent"] not in suite_name:
457                 continue
458             row_lst = list()
459             for column in table[u"columns"]:
460                 try:
461                     col_data = str(data[test][column[
462                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
463                     # Do not include tests with "Test Failed" in test message
464                     if u"Test Failed" in col_data:
465                         continue
466                     col_data = col_data.replace(
467                         u"No Data", u"Not Captured     "
468                     )
469                     if column[u"data"].split(u" ")[1] in (u"name", ):
470                         if len(col_data) > 30:
471                             col_data_lst = col_data.split(u"-")
472                             half = int(len(col_data_lst) / 2)
473                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
474                                        f"- |br| " \
475                                        f"{u'-'.join(col_data_lst[half:])}"
476                         col_data = f" |prein| {col_data} |preout| "
477                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
478                         # Temporary solution: remove NDR results from message:
479                         if bool(table.get(u'remove-ndr', False)):
480                             try:
481                                 col_data = col_data.split(u"\n", 1)[1]
482                             except IndexError:
483                                 pass
484                         col_data = col_data.replace(u'\n', u' |br| ').\
485                             replace(u'\r', u'').replace(u'"', u"'")
486                         col_data = f" |prein| {col_data} |preout| "
487                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
488                         col_data = col_data.replace(u'\n', u' |br| ')
489                         col_data = f" |prein| {col_data[:-5]} |preout| "
490                     row_lst.append(f'"{col_data}"')
491                 except KeyError:
492                     row_lst.append(u'"Not captured"')
493             if len(row_lst) == len(table[u"columns"]):
494                 table_lst.append(row_lst)
495
496         # Write the data to file
497         if table_lst:
498             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
499             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
500             logging.info(f"      Writing file: {file_name}")
501             with open(file_name, u"wt") as file_handler:
502                 file_handler.write(u",".join(header) + u"\n")
503                 for item in table_lst:
504                     file_handler.write(u",".join(item) + u"\n")
505
506     logging.info(u"  Done.")
507
508
509 def _tpc_modify_test_name(test_name, ignore_nic=False):
510     """Modify a test name by replacing its parts.
511
512     :param test_name: Test name to be modified.
513     :param ignore_nic: If True, NIC is removed from TC name.
514     :type test_name: str
515     :type ignore_nic: bool
516     :returns: Modified test name.
517     :rtype: str
518     """
519     test_name_mod = test_name.\
520         replace(u"-ndrpdr", u"").\
521         replace(u"1t1c", u"1c").\
522         replace(u"2t1c", u"1c"). \
523         replace(u"2t2c", u"2c").\
524         replace(u"4t2c", u"2c"). \
525         replace(u"4t4c", u"4c").\
526         replace(u"8t4c", u"4c")
527
528     if ignore_nic:
529         return re.sub(REGEX_NIC, u"", test_name_mod)
530     return test_name_mod
531
532
533 def _tpc_modify_displayed_test_name(test_name):
534     """Modify a test name which is displayed in a table by replacing its parts.
535
536     :param test_name: Test name to be modified.
537     :type test_name: str
538     :returns: Modified test name.
539     :rtype: str
540     """
541     return test_name.\
542         replace(u"1t1c", u"1c").\
543         replace(u"2t1c", u"1c"). \
544         replace(u"2t2c", u"2c").\
545         replace(u"4t2c", u"2c"). \
546         replace(u"4t4c", u"4c").\
547         replace(u"8t4c", u"4c")
548
549
550 def _tpc_insert_data(target, src, include_tests):
551     """Insert src data to the target structure.
552
553     :param target: Target structure where the data is placed.
554     :param src: Source data to be placed into the target structure.
555     :param include_tests: Which results will be included (MRR, NDR, PDR).
556     :type target: list
557     :type src: dict
558     :type include_tests: str
559     """
560     try:
561         if include_tests == u"MRR":
562             target[u"mean"] = src[u"result"][u"receive-rate"]
563             target[u"stdev"] = src[u"result"][u"receive-stdev"]
564         elif include_tests == u"PDR":
565             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
566         elif include_tests == u"NDR":
567             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
568         elif u"latency" in include_tests:
569             keys = include_tests.split(u"-")
570             if len(keys) == 4:
571                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
572                 target[u"data"].append(
573                     float(u"nan") if lat == -1 else lat * 1e6
574                 )
575         elif include_tests == u"hoststack":
576             try:
577                 target[u"data"].append(
578                     float(src[u"result"][u"bits_per_second"])
579                 )
580             except KeyError:
581                 target[u"data"].append(
582                     (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
583                     ((float(src[u"result"][u"client"][u"time"]) +
584                       float(src[u"result"][u"server"][u"time"])) / 2)
585                 )
586         elif include_tests == u"vsap":
587             try:
588                 target[u"data"].append(src[u"result"][u"cps"])
589             except KeyError:
590                 target[u"data"].append(src[u"result"][u"rps"])
591     except (KeyError, TypeError):
592         pass
593
594
595 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
596                              footnote=u"", sort_data=True, title=u"",
597                              generate_rst=True):
598     """Generate html table from input data with simple sorting possibility.
599
600     :param header: Table header.
601     :param data: Input data to be included in the table. It is a list of lists.
602         Inner lists are rows in the table. All inner lists must be of the same
603         length. The length of these lists must be the same as the length of the
604         header.
605     :param out_file_name: The name (relative or full path) where the
606         generated html table is written.
607     :param legend: The legend to display below the table.
608     :param footnote: The footnote to display below the table (and legend).
609     :param sort_data: If True the data sorting is enabled.
610     :param title: The table (and file) title.
611     :param generate_rst: If True, wrapping rst file is generated.
612     :type header: list
613     :type data: list of lists
614     :type out_file_name: str
615     :type legend: str
616     :type footnote: str
617     :type sort_data: bool
618     :type title: str
619     :type generate_rst: bool
620     """
621
622     try:
623         idx = header.index(u"Test Case")
624     except ValueError:
625         idx = 0
626     params = {
627         u"align-hdr": (
628             [u"left", u"right"],
629             [u"left", u"left", u"right"],
630             [u"left", u"left", u"left", u"right"]
631         ),
632         u"align-itm": (
633             [u"left", u"right"],
634             [u"left", u"left", u"right"],
635             [u"left", u"left", u"left", u"right"]
636         ),
637         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
638     }
639
640     df_data = pd.DataFrame(data, columns=header)
641
642     if sort_data:
643         df_sorted = [df_data.sort_values(
644             by=[key, header[idx]], ascending=[True, True]
645             if key != header[idx] else [False, True]) for key in header]
646         df_sorted_rev = [df_data.sort_values(
647             by=[key, header[idx]], ascending=[False, True]
648             if key != header[idx] else [True, True]) for key in header]
649         df_sorted.extend(df_sorted_rev)
650     else:
651         df_sorted = df_data
652
653     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
654                    for idx in range(len(df_data))]]
655     table_header = dict(
656         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
657         fill_color=u"#7eade7",
658         align=params[u"align-hdr"][idx],
659         font=dict(
660             family=u"Courier New",
661             size=12
662         )
663     )
664
665     fig = go.Figure()
666
667     if sort_data:
668         for table in df_sorted:
669             columns = [table.get(col) for col in header]
670             fig.add_trace(
671                 go.Table(
672                     columnwidth=params[u"width"][idx],
673                     header=table_header,
674                     cells=dict(
675                         values=columns,
676                         fill_color=fill_color,
677                         align=params[u"align-itm"][idx],
678                         font=dict(
679                             family=u"Courier New",
680                             size=12
681                         )
682                     )
683                 )
684             )
685
686         buttons = list()
687         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
688         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
689         for idx, hdr in enumerate(menu_items):
690             visible = [False, ] * len(menu_items)
691             visible[idx] = True
692             buttons.append(
693                 dict(
694                     label=hdr.replace(u" [Mpps]", u""),
695                     method=u"update",
696                     args=[{u"visible": visible}],
697                 )
698             )
699
700         fig.update_layout(
701             updatemenus=[
702                 go.layout.Updatemenu(
703                     type=u"dropdown",
704                     direction=u"down",
705                     x=0.0,
706                     xanchor=u"left",
707                     y=1.002,
708                     yanchor=u"bottom",
709                     active=len(menu_items) - 1,
710                     buttons=list(buttons)
711                 )
712             ],
713         )
714     else:
715         fig.add_trace(
716             go.Table(
717                 columnwidth=params[u"width"][idx],
718                 header=table_header,
719                 cells=dict(
720                     values=[df_sorted.get(col) for col in header],
721                     fill_color=fill_color,
722                     align=params[u"align-itm"][idx],
723                     font=dict(
724                         family=u"Courier New",
725                         size=12
726                     )
727                 )
728             )
729         )
730
731     ploff.plot(
732         fig,
733         show_link=False,
734         auto_open=False,
735         filename=f"{out_file_name}_in.html"
736     )
737
738     if not generate_rst:
739         return
740
741     file_name = out_file_name.split(u"/")[-1]
742     if u"vpp" in out_file_name:
743         path = u"_tmp/src/vpp_performance_tests/comparisons/"
744     else:
745         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
746     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
747     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
748         rst_file.write(
749             u"\n"
750             u".. |br| raw:: html\n\n    <br />\n\n\n"
751             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
752             u".. |preout| raw:: html\n\n    </pre>\n\n"
753         )
754         if title:
755             rst_file.write(f"{title}\n")
756             rst_file.write(f"{u'`' * len(title)}\n\n")
757         rst_file.write(
758             u".. raw:: html\n\n"
759             f'    <iframe frameborder="0" scrolling="no" '
760             f'width="1600" height="1200" '
761             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
762             f'</iframe>\n\n'
763         )
764
765         if legend:
766             try:
767                 itm_lst = legend[1:-2].split(u"\n")
768                 rst_file.write(
769                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
770                 )
771             except IndexError as err:
772                 logging.error(f"Legend cannot be written to html file\n{err}")
773         if footnote:
774             try:
775                 itm_lst = footnote[1:].split(u"\n")
776                 rst_file.write(
777                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
778                 )
779             except IndexError as err:
780                 logging.error(f"Footnote cannot be written to html file\n{err}")
781
782
783 def table_soak_vs_ndr(table, input_data):
784     """Generate the table(s) with algorithm: table_soak_vs_ndr
785     specified in the specification file.
786
787     :param table: Table to generate.
788     :param input_data: Data to process.
789     :type table: pandas.Series
790     :type input_data: InputData
791     """
792
793     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
794
795     # Transform the data
796     logging.info(
797         f"    Creating the data set for the {table.get(u'type', u'')} "
798         f"{table.get(u'title', u'')}."
799     )
800     data = input_data.filter_data(table, continue_on_error=True)
801
802     # Prepare the header of the table
803     try:
804         header = [
805             u"Test Case",
806             f"Avg({table[u'reference'][u'title']})",
807             f"Stdev({table[u'reference'][u'title']})",
808             f"Avg({table[u'compare'][u'title']})",
809             f"Stdev{table[u'compare'][u'title']})",
810             u"Diff",
811             u"Stdev(Diff)"
812         ]
813         header_str = u";".join(header) + u"\n"
814         legend = (
815             u"\nLegend:\n"
816             f"Avg({table[u'reference'][u'title']}): "
817             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
818             f"from a series of runs of the listed tests.\n"
819             f"Stdev({table[u'reference'][u'title']}): "
820             f"Standard deviation value of {table[u'reference'][u'title']} "
821             f"[Mpps] computed from a series of runs of the listed tests.\n"
822             f"Avg({table[u'compare'][u'title']}): "
823             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
824             f"a series of runs of the listed tests.\n"
825             f"Stdev({table[u'compare'][u'title']}): "
826             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
827             f"computed from a series of runs of the listed tests.\n"
828             f"Diff({table[u'reference'][u'title']},"
829             f"{table[u'compare'][u'title']}): "
830             f"Percentage change calculated for mean values.\n"
831             u"Stdev(Diff): "
832             u"Standard deviation of percentage change calculated for mean "
833             u"values."
834         )
835     except (AttributeError, KeyError) as err:
836         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
837         return
838
839     # Create a list of available SOAK test results:
840     tbl_dict = dict()
841     for job, builds in table[u"compare"][u"data"].items():
842         for build in builds:
843             for tst_name, tst_data in data[job][str(build)].items():
844                 if tst_data[u"type"] == u"SOAK":
845                     tst_name_mod = tst_name.replace(u"-soak", u"")
846                     if tbl_dict.get(tst_name_mod, None) is None:
847                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
848                         nic = groups.group(0) if groups else u""
849                         name = (
850                             f"{nic}-"
851                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
852                         )
853                         tbl_dict[tst_name_mod] = {
854                             u"name": name,
855                             u"ref-data": list(),
856                             u"cmp-data": list()
857                         }
858                     try:
859                         tbl_dict[tst_name_mod][u"cmp-data"].append(
860                             tst_data[u"throughput"][u"LOWER"])
861                     except (KeyError, TypeError):
862                         pass
863     tests_lst = tbl_dict.keys()
864
865     # Add corresponding NDR test results:
866     for job, builds in table[u"reference"][u"data"].items():
867         for build in builds:
868             for tst_name, tst_data in data[job][str(build)].items():
869                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
870                     replace(u"-mrr", u"")
871                 if tst_name_mod not in tests_lst:
872                     continue
873                 try:
874                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
875                         continue
876                     if table[u"include-tests"] == u"MRR":
877                         result = (tst_data[u"result"][u"receive-rate"],
878                                   tst_data[u"result"][u"receive-stdev"])
879                     elif table[u"include-tests"] == u"PDR":
880                         result = \
881                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
882                     elif table[u"include-tests"] == u"NDR":
883                         result = \
884                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
885                     else:
886                         result = None
887                     if result is not None:
888                         tbl_dict[tst_name_mod][u"ref-data"].append(
889                             result)
890                 except (KeyError, TypeError):
891                     continue
892
893     tbl_lst = list()
894     for tst_name in tbl_dict:
895         item = [tbl_dict[tst_name][u"name"], ]
896         data_r = tbl_dict[tst_name][u"ref-data"]
897         if data_r:
898             if table[u"include-tests"] == u"MRR":
899                 data_r_mean = data_r[0][0]
900                 data_r_stdev = data_r[0][1]
901             else:
902                 data_r_mean = mean(data_r)
903                 data_r_stdev = stdev(data_r)
904             item.append(round(data_r_mean / 1e6, 1))
905             item.append(round(data_r_stdev / 1e6, 1))
906         else:
907             data_r_mean = None
908             data_r_stdev = None
909             item.extend([None, None])
910         data_c = tbl_dict[tst_name][u"cmp-data"]
911         if data_c:
912             if table[u"include-tests"] == u"MRR":
913                 data_c_mean = data_c[0][0]
914                 data_c_stdev = data_c[0][1]
915             else:
916                 data_c_mean = mean(data_c)
917                 data_c_stdev = stdev(data_c)
918             item.append(round(data_c_mean / 1e6, 1))
919             item.append(round(data_c_stdev / 1e6, 1))
920         else:
921             data_c_mean = None
922             data_c_stdev = None
923             item.extend([None, None])
924         if data_r_mean is not None and data_c_mean is not None:
925             delta, d_stdev = relative_change_stdev(
926                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
927             try:
928                 item.append(round(delta))
929             except ValueError:
930                 item.append(delta)
931             try:
932                 item.append(round(d_stdev))
933             except ValueError:
934                 item.append(d_stdev)
935             tbl_lst.append(item)
936
937     # Sort the table according to the relative change
938     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
939
940     # Generate csv tables:
941     csv_file_name = f"{table[u'output-file']}.csv"
942     with open(csv_file_name, u"wt") as file_handler:
943         file_handler.write(header_str)
944         for test in tbl_lst:
945             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
946
947     convert_csv_to_pretty_txt(
948         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
949     )
950     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
951         file_handler.write(legend)
952
953     # Generate html table:
954     _tpc_generate_html_table(
955         header,
956         tbl_lst,
957         table[u'output-file'],
958         legend=legend,
959         title=table.get(u"title", u"")
960     )
961
962
963 def table_perf_trending_dash(table, input_data):
964     """Generate the table(s) with algorithm:
965     table_perf_trending_dash
966     specified in the specification file.
967
968     :param table: Table to generate.
969     :param input_data: Data to process.
970     :type table: pandas.Series
971     :type input_data: InputData
972     """
973
974     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
975
976     # Transform the data
977     logging.info(
978         f"    Creating the data set for the {table.get(u'type', u'')} "
979         f"{table.get(u'title', u'')}."
980     )
981     data = input_data.filter_data(table, continue_on_error=True)
982
983     # Prepare the header of the tables
984     header = [
985         u"Test Case",
986         u"Trend [Mpps]",
987         u"Runs [#]",
988         u"Long-Term Change [%]",
989         u"Regressions [#]",
990         u"Progressions [#]"
991     ]
992     header_str = u",".join(header) + u"\n"
993
994     incl_tests = table.get(u"include-tests", u"MRR")
995
996     # Prepare data to the table:
997     tbl_dict = dict()
998     for job, builds in table[u"data"].items():
999         for build in builds:
1000             for tst_name, tst_data in data[job][str(build)].items():
1001                 if tst_name.lower() in table.get(u"ignore-list", list()):
1002                     continue
1003                 if tbl_dict.get(tst_name, None) is None:
1004                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1005                     if not groups:
1006                         continue
1007                     nic = groups.group(0)
1008                     tbl_dict[tst_name] = {
1009                         u"name": f"{nic}-{tst_data[u'name']}",
1010                         u"data": OrderedDict()
1011                     }
1012                 try:
1013                     if incl_tests == u"MRR":
1014                         tbl_dict[tst_name][u"data"][str(build)] = \
1015                             tst_data[u"result"][u"receive-rate"]
1016                     elif incl_tests == u"NDR":
1017                         tbl_dict[tst_name][u"data"][str(build)] = \
1018                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1019                     elif incl_tests == u"PDR":
1020                         tbl_dict[tst_name][u"data"][str(build)] = \
1021                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1022                 except (TypeError, KeyError):
1023                     pass  # No data in output.xml for this test
1024
1025     tbl_lst = list()
1026     for tst_name in tbl_dict:
1027         data_t = tbl_dict[tst_name][u"data"]
1028         if len(data_t) < 2:
1029             continue
1030
1031         try:
1032             classification_lst, avgs, _ = classify_anomalies(data_t)
1033         except ValueError as err:
1034             logging.info(f"{err} Skipping")
1035             return
1036
1037         win_size = min(len(data_t), table[u"window"])
1038         long_win_size = min(len(data_t), table[u"long-trend-window"])
1039
1040         try:
1041             max_long_avg = max(
1042                 [x for x in avgs[-long_win_size:-win_size]
1043                  if not isnan(x)])
1044         except ValueError:
1045             max_long_avg = nan
1046         last_avg = avgs[-1]
1047         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1048
1049         nr_of_last_avgs = 0;
1050         for x in reversed(avgs):
1051             if x == last_avg:
1052                 nr_of_last_avgs += 1
1053             else:
1054                 break
1055
1056         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1057             rel_change_last = nan
1058         else:
1059             rel_change_last = round(
1060                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1061
1062         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1063             rel_change_long = nan
1064         else:
1065             rel_change_long = round(
1066                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1067
1068         if classification_lst:
1069             if isnan(rel_change_last) and isnan(rel_change_long):
1070                 continue
1071             if isnan(last_avg) or isnan(rel_change_last) or \
1072                     isnan(rel_change_long):
1073                 continue
1074             tbl_lst.append(
1075                 [tbl_dict[tst_name][u"name"],
1076                  round(last_avg / 1e6, 2),
1077                  nr_of_last_avgs,
1078                  rel_change_long,
1079                  classification_lst[-win_size+1:].count(u"regression"),
1080                  classification_lst[-win_size+1:].count(u"progression")])
1081
1082     tbl_lst.sort(key=lambda rel: rel[0])
1083     tbl_lst.sort(key=lambda rel: rel[2])
1084     tbl_lst.sort(key=lambda rel: rel[3])
1085     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1086     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1087
1088     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1089
1090     logging.info(f"    Writing file: {file_name}")
1091     with open(file_name, u"wt") as file_handler:
1092         file_handler.write(header_str)
1093         for test in tbl_lst:
1094             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1095
1096     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1097     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1098
1099
1100 def _generate_url(testbed, test_name):
1101     """Generate URL to a trending plot from the name of the test case.
1102
1103     :param testbed: The testbed used for testing.
1104     :param test_name: The name of the test case.
1105     :type testbed: str
1106     :type test_name: str
1107     :returns: The URL to the plot with the trending data for the given test
1108         case.
1109     :rtype str
1110     """
1111
1112     if u"x520" in test_name:
1113         nic = u"x520"
1114     elif u"x710" in test_name:
1115         nic = u"x710"
1116     elif u"xl710" in test_name:
1117         nic = u"xl710"
1118     elif u"xxv710" in test_name:
1119         nic = u"xxv710"
1120     elif u"vic1227" in test_name:
1121         nic = u"vic1227"
1122     elif u"vic1385" in test_name:
1123         nic = u"vic1385"
1124     elif u"x553" in test_name:
1125         nic = u"x553"
1126     elif u"cx556" in test_name or u"cx556a" in test_name:
1127         nic = u"cx556a"
1128     elif u"ena" in test_name:
1129         nic = u"nitro50g"
1130     else:
1131         nic = u""
1132
1133     if u"64b" in test_name:
1134         frame_size = u"64b"
1135     elif u"78b" in test_name:
1136         frame_size = u"78b"
1137     elif u"imix" in test_name:
1138         frame_size = u"imix"
1139     elif u"9000b" in test_name:
1140         frame_size = u"9000b"
1141     elif u"1518b" in test_name:
1142         frame_size = u"1518b"
1143     elif u"114b" in test_name:
1144         frame_size = u"114b"
1145     else:
1146         frame_size = u""
1147
1148     if u"1t1c" in test_name or \
1149         (u"-1c-" in test_name and
1150          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1151         cores = u"1t1c"
1152     elif u"2t2c" in test_name or \
1153          (u"-2c-" in test_name and
1154           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1155         cores = u"2t2c"
1156     elif u"4t4c" in test_name or \
1157          (u"-4c-" in test_name and
1158           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1159         cores = u"4t4c"
1160     elif u"2t1c" in test_name or \
1161          (u"-1c-" in test_name and
1162           testbed in
1163           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1164            u"2n-aws", u"3n-aws")):
1165         cores = u"2t1c"
1166     elif u"4t2c" in test_name or \
1167          (u"-2c-" in test_name and
1168           testbed in
1169           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1170            u"2n-aws", u"3n-aws")):
1171         cores = u"4t2c"
1172     elif u"8t4c" in test_name or \
1173          (u"-4c-" in test_name and
1174           testbed in
1175           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1176            u"2n-aws", u"3n-aws")):
1177         cores = u"8t4c"
1178     else:
1179         cores = u""
1180
1181     if u"testpmd" in test_name:
1182         driver = u"testpmd"
1183     elif u"l3fwd" in test_name:
1184         driver = u"l3fwd"
1185     elif u"avf" in test_name:
1186         driver = u"avf"
1187     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1188         driver = u"af_xdp"
1189     elif u"rdma" in test_name:
1190         driver = u"rdma"
1191     elif u"dnv" in testbed or u"tsh" in testbed:
1192         driver = u"ixgbe"
1193     elif u"ena" in test_name:
1194         driver = u"ena"
1195     else:
1196         driver = u"dpdk"
1197
1198     if u"macip-iacl1s" in test_name:
1199         bsf = u"features-macip-iacl1"
1200     elif u"macip-iacl10s" in test_name:
1201         bsf = u"features-macip-iacl10"
1202     elif u"macip-iacl50s" in test_name:
1203         bsf = u"features-macip-iacl50"
1204     elif u"iacl1s" in test_name:
1205         bsf = u"features-iacl1"
1206     elif u"iacl10s" in test_name:
1207         bsf = u"features-iacl10"
1208     elif u"iacl50s" in test_name:
1209         bsf = u"features-iacl50"
1210     elif u"oacl1s" in test_name:
1211         bsf = u"features-oacl1"
1212     elif u"oacl10s" in test_name:
1213         bsf = u"features-oacl10"
1214     elif u"oacl50s" in test_name:
1215         bsf = u"features-oacl50"
1216     elif u"nat44det" in test_name:
1217         bsf = u"nat44det-bidir"
1218     elif u"nat44ed" in test_name and u"udir" in test_name:
1219         bsf = u"nat44ed-udir"
1220     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1221         bsf = u"udp-cps"
1222     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1223         bsf = u"tcp-cps"
1224     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1225         bsf = u"udp-pps"
1226     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1227         bsf = u"tcp-pps"
1228     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1229         bsf = u"udp-tput"
1230     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1231         bsf = u"tcp-tput"
1232     elif u"udpsrcscale" in test_name:
1233         bsf = u"features-udp"
1234     elif u"iacl" in test_name:
1235         bsf = u"features"
1236     elif u"policer" in test_name:
1237         bsf = u"features"
1238     elif u"adl" in test_name:
1239         bsf = u"features"
1240     elif u"cop" in test_name:
1241         bsf = u"features"
1242     elif u"nat" in test_name:
1243         bsf = u"features"
1244     elif u"macip" in test_name:
1245         bsf = u"features"
1246     elif u"scale" in test_name:
1247         bsf = u"scale"
1248     elif u"base" in test_name:
1249         bsf = u"base"
1250     else:
1251         bsf = u"base"
1252
1253     if u"114b" in test_name and u"vhost" in test_name:
1254         domain = u"vts"
1255     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1256         domain = u"nat44"
1257         if u"nat44det" in test_name:
1258             domain += u"-det-bidir"
1259         else:
1260             domain += u"-ed"
1261         if u"udir" in test_name:
1262             domain += u"-unidir"
1263         elif u"-ethip4udp-" in test_name:
1264             domain += u"-udp"
1265         elif u"-ethip4tcp-" in test_name:
1266             domain += u"-tcp"
1267         if u"-cps" in test_name:
1268             domain += u"-cps"
1269         elif u"-pps" in test_name:
1270             domain += u"-pps"
1271         elif u"-tput" in test_name:
1272             domain += u"-tput"
1273     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1274         domain = u"dpdk"
1275     elif u"memif" in test_name:
1276         domain = u"container_memif"
1277     elif u"srv6" in test_name:
1278         domain = u"srv6"
1279     elif u"vhost" in test_name:
1280         domain = u"vhost"
1281         if u"vppl2xc" in test_name:
1282             driver += u"-vpp"
1283         else:
1284             driver += u"-testpmd"
1285         if u"lbvpplacp" in test_name:
1286             bsf += u"-link-bonding"
1287     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1288         domain = u"nf_service_density_vnfc"
1289     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1290         domain = u"nf_service_density_cnfc"
1291     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1292         domain = u"nf_service_density_cnfp"
1293     elif u"ipsec" in test_name:
1294         domain = u"ipsec"
1295         if u"sw" in test_name:
1296             bsf += u"-sw"
1297         elif u"hw" in test_name:
1298             bsf += u"-hw"
1299         elif u"spe" in test_name:
1300             bsf += u"-spe"
1301     elif u"ethip4vxlan" in test_name:
1302         domain = u"ip4_tunnels"
1303     elif u"ethip4udpgeneve" in test_name:
1304         domain = u"ip4_tunnels"
1305     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1306         domain = u"ip4"
1307     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1308         domain = u"ip6"
1309     elif u"l2xcbase" in test_name or \
1310             u"l2xcscale" in test_name or \
1311             u"l2bdbasemaclrn" in test_name or \
1312             u"l2bdscale" in test_name or \
1313             u"l2patch" in test_name:
1314         domain = u"l2"
1315     else:
1316         domain = u""
1317
1318     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1319     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1320
1321     return file_name + anchor_name
1322
1323
1324 def table_perf_trending_dash_html(table, input_data):
1325     """Generate the table(s) with algorithm:
1326     table_perf_trending_dash_html specified in the specification
1327     file.
1328
1329     :param table: Table to generate.
1330     :param input_data: Data to process.
1331     :type table: dict
1332     :type input_data: InputData
1333     """
1334
1335     _ = input_data
1336
1337     if not table.get(u"testbed", None):
1338         logging.error(
1339             f"The testbed is not defined for the table "
1340             f"{table.get(u'title', u'')}. Skipping."
1341         )
1342         return
1343
1344     test_type = table.get(u"test-type", u"MRR")
1345     if test_type not in (u"MRR", u"NDR", u"PDR"):
1346         logging.error(
1347             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1348             f"Skipping."
1349         )
1350         return
1351
1352     if test_type in (u"NDR", u"PDR"):
1353         lnk_dir = u"../ndrpdr_trending/"
1354         lnk_sufix = f"-{test_type.lower()}"
1355     else:
1356         lnk_dir = u"../trending/"
1357         lnk_sufix = u""
1358
1359     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1360
1361     try:
1362         with open(table[u"input-file"], u'rt') as csv_file:
1363             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1364     except FileNotFoundError as err:
1365         logging.warning(f"{err}")
1366         return
1367     except KeyError:
1368         logging.warning(u"The input file is not defined.")
1369         return
1370     except csv.Error as err:
1371         logging.warning(
1372             f"Not possible to process the file {table[u'input-file']}.\n"
1373             f"{repr(err)}"
1374         )
1375         return
1376
1377     # Table:
1378     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1379
1380     # Table header:
1381     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1382     for idx, item in enumerate(csv_lst[0]):
1383         alignment = u"left" if idx == 0 else u"center"
1384         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1385         thead.text = item
1386
1387     # Rows:
1388     colors = {
1389         u"regression": (
1390             u"#ffcccc",
1391             u"#ff9999"
1392         ),
1393         u"progression": (
1394             u"#c6ecc6",
1395             u"#9fdf9f"
1396         ),
1397         u"normal": (
1398             u"#e9f1fb",
1399             u"#d4e4f7"
1400         )
1401     }
1402     for r_idx, row in enumerate(csv_lst[1:]):
1403         if int(row[4]):
1404             color = u"regression"
1405         elif int(row[5]):
1406             color = u"progression"
1407         else:
1408             color = u"normal"
1409         trow = ET.SubElement(
1410             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1411         )
1412
1413         # Columns:
1414         for c_idx, item in enumerate(row):
1415             tdata = ET.SubElement(
1416                 trow,
1417                 u"td",
1418                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1419             )
1420             # Name:
1421             if c_idx == 0 and table.get(u"add-links", True):
1422                 ref = ET.SubElement(
1423                     tdata,
1424                     u"a",
1425                     attrib=dict(
1426                         href=f"{lnk_dir}"
1427                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1428                         f"{lnk_sufix}"
1429                     )
1430                 )
1431                 ref.text = item
1432             else:
1433                 tdata.text = item
1434     try:
1435         with open(table[u"output-file"], u'w') as html_file:
1436             logging.info(f"    Writing file: {table[u'output-file']}")
1437             html_file.write(u".. raw:: html\n\n\t")
1438             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1439             html_file.write(u"\n\t<p><br><br></p>\n")
1440     except KeyError:
1441         logging.warning(u"The output file is not defined.")
1442         return
1443
1444
1445 def table_last_failed_tests(table, input_data):
1446     """Generate the table(s) with algorithm: table_last_failed_tests
1447     specified in the specification file.
1448
1449     :param table: Table to generate.
1450     :param input_data: Data to process.
1451     :type table: pandas.Series
1452     :type input_data: InputData
1453     """
1454
1455     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1456
1457     # Transform the data
1458     logging.info(
1459         f"    Creating the data set for the {table.get(u'type', u'')} "
1460         f"{table.get(u'title', u'')}."
1461     )
1462
1463     data = input_data.filter_data(table, continue_on_error=True)
1464
1465     if data is None or data.empty:
1466         logging.warning(
1467             f"    No data for the {table.get(u'type', u'')} "
1468             f"{table.get(u'title', u'')}."
1469         )
1470         return
1471
1472     tbl_list = list()
1473     for job, builds in table[u"data"].items():
1474         for build in builds:
1475             build = str(build)
1476             try:
1477                 version = input_data.metadata(job, build).get(u"version", u"")
1478                 duration = \
1479                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1480             except KeyError:
1481                 logging.error(f"Data for {job}: {build} is not present.")
1482                 return
1483             tbl_list.append(build)
1484             tbl_list.append(version)
1485             failed_tests = list()
1486             passed = 0
1487             failed = 0
1488             for tst_data in data[job][build].values:
1489                 if tst_data[u"status"] != u"FAIL":
1490                     passed += 1
1491                     continue
1492                 failed += 1
1493                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1494                 if not groups:
1495                     continue
1496                 nic = groups.group(0)
1497                 msg = tst_data[u'msg'].replace(u"\n", u"")
1498                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1499                              'xxx.xxx.xxx.xxx', msg)
1500                 msg = msg.split(u'Also teardown failed')[0]
1501                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1502             tbl_list.append(passed)
1503             tbl_list.append(failed)
1504             tbl_list.append(duration)
1505             tbl_list.extend(failed_tests)
1506
1507     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1508     logging.info(f"    Writing file: {file_name}")
1509     with open(file_name, u"wt") as file_handler:
1510         for test in tbl_list:
1511             file_handler.write(f"{test}\n")
1512
1513
1514 def table_failed_tests(table, input_data):
1515     """Generate the table(s) with algorithm: table_failed_tests
1516     specified in the specification file.
1517
1518     :param table: Table to generate.
1519     :param input_data: Data to process.
1520     :type table: pandas.Series
1521     :type input_data: InputData
1522     """
1523
1524     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1525
1526     # Transform the data
1527     logging.info(
1528         f"    Creating the data set for the {table.get(u'type', u'')} "
1529         f"{table.get(u'title', u'')}."
1530     )
1531     data = input_data.filter_data(table, continue_on_error=True)
1532
1533     test_type = u"MRR"
1534     if u"NDRPDR" in table.get(u"filter", list()):
1535         test_type = u"NDRPDR"
1536
1537     # Prepare the header of the tables
1538     header = [
1539         u"Test Case",
1540         u"Failures [#]",
1541         u"Last Failure [Time]",
1542         u"Last Failure [VPP-Build-Id]",
1543         u"Last Failure [CSIT-Job-Build-Id]"
1544     ]
1545
1546     # Generate the data for the table according to the model in the table
1547     # specification
1548
1549     now = dt.utcnow()
1550     timeperiod = timedelta(int(table.get(u"window", 7)))
1551
1552     tbl_dict = dict()
1553     for job, builds in table[u"data"].items():
1554         for build in builds:
1555             build = str(build)
1556             for tst_name, tst_data in data[job][build].items():
1557                 if tst_name.lower() in table.get(u"ignore-list", list()):
1558                     continue
1559                 if tbl_dict.get(tst_name, None) is None:
1560                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1561                     if not groups:
1562                         continue
1563                     nic = groups.group(0)
1564                     tbl_dict[tst_name] = {
1565                         u"name": f"{nic}-{tst_data[u'name']}",
1566                         u"data": OrderedDict()
1567                     }
1568                 try:
1569                     generated = input_data.metadata(job, build).\
1570                         get(u"generated", u"")
1571                     if not generated:
1572                         continue
1573                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1574                     if (now - then) <= timeperiod:
1575                         tbl_dict[tst_name][u"data"][build] = (
1576                             tst_data[u"status"],
1577                             generated,
1578                             input_data.metadata(job, build).get(u"version",
1579                                                                 u""),
1580                             build
1581                         )
1582                 except (TypeError, KeyError) as err:
1583                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1584
1585     max_fails = 0
1586     tbl_lst = list()
1587     for tst_data in tbl_dict.values():
1588         fails_nr = 0
1589         fails_last_date = u""
1590         fails_last_vpp = u""
1591         fails_last_csit = u""
1592         for val in tst_data[u"data"].values():
1593             if val[0] == u"FAIL":
1594                 fails_nr += 1
1595                 fails_last_date = val[1]
1596                 fails_last_vpp = val[2]
1597                 fails_last_csit = val[3]
1598         if fails_nr:
1599             max_fails = fails_nr if fails_nr > max_fails else max_fails
1600             tbl_lst.append([
1601                 tst_data[u"name"],
1602                 fails_nr,
1603                 fails_last_date,
1604                 fails_last_vpp,
1605                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1606                 f"-build-{fails_last_csit}"
1607             ])
1608
1609     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1610     tbl_sorted = list()
1611     for nrf in range(max_fails, -1, -1):
1612         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1613         tbl_sorted.extend(tbl_fails)
1614
1615     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1616     logging.info(f"    Writing file: {file_name}")
1617     with open(file_name, u"wt") as file_handler:
1618         file_handler.write(u",".join(header) + u"\n")
1619         for test in tbl_sorted:
1620             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1621
1622     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1623     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1624
1625
1626 def table_failed_tests_html(table, input_data):
1627     """Generate the table(s) with algorithm: table_failed_tests_html
1628     specified in the specification file.
1629
1630     :param table: Table to generate.
1631     :param input_data: Data to process.
1632     :type table: pandas.Series
1633     :type input_data: InputData
1634     """
1635
1636     _ = input_data
1637
1638     if not table.get(u"testbed", None):
1639         logging.error(
1640             f"The testbed is not defined for the table "
1641             f"{table.get(u'title', u'')}. Skipping."
1642         )
1643         return
1644
1645     test_type = table.get(u"test-type", u"MRR")
1646     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1647         logging.error(
1648             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1649             f"Skipping."
1650         )
1651         return
1652
1653     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1654         lnk_dir = u"../ndrpdr_trending/"
1655         lnk_sufix = u"-pdr"
1656     else:
1657         lnk_dir = u"../trending/"
1658         lnk_sufix = u""
1659
1660     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1661
1662     try:
1663         with open(table[u"input-file"], u'rt') as csv_file:
1664             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1665     except KeyError:
1666         logging.warning(u"The input file is not defined.")
1667         return
1668     except csv.Error as err:
1669         logging.warning(
1670             f"Not possible to process the file {table[u'input-file']}.\n"
1671             f"{repr(err)}"
1672         )
1673         return
1674
1675     # Table:
1676     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1677
1678     # Table header:
1679     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1680     for idx, item in enumerate(csv_lst[0]):
1681         alignment = u"left" if idx == 0 else u"center"
1682         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1683         thead.text = item
1684
1685     # Rows:
1686     colors = (u"#e9f1fb", u"#d4e4f7")
1687     for r_idx, row in enumerate(csv_lst[1:]):
1688         background = colors[r_idx % 2]
1689         trow = ET.SubElement(
1690             failed_tests, u"tr", attrib=dict(bgcolor=background)
1691         )
1692
1693         # Columns:
1694         for c_idx, item in enumerate(row):
1695             tdata = ET.SubElement(
1696                 trow,
1697                 u"td",
1698                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1699             )
1700             # Name:
1701             if c_idx == 0 and table.get(u"add-links", True):
1702                 ref = ET.SubElement(
1703                     tdata,
1704                     u"a",
1705                     attrib=dict(
1706                         href=f"{lnk_dir}"
1707                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1708                         f"{lnk_sufix}"
1709                     )
1710                 )
1711                 ref.text = item
1712             else:
1713                 tdata.text = item
1714     try:
1715         with open(table[u"output-file"], u'w') as html_file:
1716             logging.info(f"    Writing file: {table[u'output-file']}")
1717             html_file.write(u".. raw:: html\n\n\t")
1718             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1719             html_file.write(u"\n\t<p><br><br></p>\n")
1720     except KeyError:
1721         logging.warning(u"The output file is not defined.")
1722         return
1723
1724
1725 def table_comparison(table, input_data):
1726     """Generate the table(s) with algorithm: table_comparison
1727     specified in the specification file.
1728
1729     :param table: Table to generate.
1730     :param input_data: Data to process.
1731     :type table: pandas.Series
1732     :type input_data: InputData
1733     """
1734     logging.info(f"  Generating the table {table.get('title', '')} ...")
1735
1736     # Transform the data
1737     logging.info(
1738         f"    Creating the data set for the {table.get('type', '')} "
1739         f"{table.get('title', '')}."
1740     )
1741
1742     columns = table.get("columns", None)
1743     if not columns:
1744         logging.error(
1745             f"No columns specified for {table.get('title', '')}. Skipping."
1746         )
1747         return
1748
1749     cols = list()
1750     for idx, col in enumerate(columns):
1751         if col.get("data-set", None) is None:
1752             logging.warning(f"No data for column {col.get('title', '')}")
1753             continue
1754         tag = col.get("tag", None)
1755         data = input_data.filter_data(
1756             table,
1757             params=[
1758                 "throughput",
1759                 "result",
1760                 "latency",
1761                 "name",
1762                 "parent",
1763                 "tags"
1764             ],
1765             data=col["data-set"],
1766             continue_on_error=True
1767         )
1768         col_data = {
1769             "title": col.get("title", f"Column{idx}"),
1770             "data": dict()
1771         }
1772         for builds in data.values:
1773             for build in builds:
1774                 for tst_name, tst_data in build.items():
1775                     if tag and tag not in tst_data["tags"]:
1776                         continue
1777                     tst_name_mod = \
1778                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1779                         replace("2n1l-", "")
1780                     if col_data["data"].get(tst_name_mod, None) is None:
1781                         name = tst_data['name'].rsplit('-', 1)[0]
1782                         if "across testbeds" in table["title"].lower() or \
1783                                 "across topologies" in table["title"].lower():
1784                             name = _tpc_modify_displayed_test_name(name)
1785                         col_data["data"][tst_name_mod] = {
1786                             "name": name,
1787                             "replace": True,
1788                             "data": list(),
1789                             "mean": None,
1790                             "stdev": None
1791                         }
1792                     _tpc_insert_data(
1793                         target=col_data["data"][tst_name_mod],
1794                         src=tst_data,
1795                         include_tests=table["include-tests"]
1796                     )
1797
1798         replacement = col.get("data-replacement", None)
1799         if replacement:
1800             rpl_data = input_data.filter_data(
1801                 table,
1802                 params=[
1803                     "throughput",
1804                     "result",
1805                     "latency",
1806                     "name",
1807                     "parent",
1808                     "tags"
1809                 ],
1810                 data=replacement,
1811                 continue_on_error=True
1812             )
1813             for builds in rpl_data.values:
1814                 for build in builds:
1815                     for tst_name, tst_data in build.items():
1816                         if tag and tag not in tst_data["tags"]:
1817                             continue
1818                         tst_name_mod = \
1819                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1820                             replace("2n1l-", "")
1821                         if col_data["data"].get(tst_name_mod, None) is None:
1822                             name = tst_data['name'].rsplit('-', 1)[0]
1823                             if "across testbeds" in table["title"].lower() \
1824                                     or "across topologies" in \
1825                                     table["title"].lower():
1826                                 name = _tpc_modify_displayed_test_name(name)
1827                             col_data["data"][tst_name_mod] = {
1828                                 "name": name,
1829                                 "replace": False,
1830                                 "data": list(),
1831                                 "mean": None,
1832                                 "stdev": None
1833                             }
1834                         if col_data["data"][tst_name_mod]["replace"]:
1835                             col_data["data"][tst_name_mod]["replace"] = False
1836                             col_data["data"][tst_name_mod]["data"] = list()
1837                         _tpc_insert_data(
1838                             target=col_data["data"][tst_name_mod],
1839                             src=tst_data,
1840                             include_tests=table["include-tests"]
1841                         )
1842
1843         if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
1844                 or "latency" in table["include-tests"]:
1845             for tst_name, tst_data in col_data["data"].items():
1846                 if tst_data["data"]:
1847                     tst_data["mean"] = mean(tst_data["data"])
1848                     tst_data["stdev"] = stdev(tst_data["data"])
1849
1850         cols.append(col_data)
1851
1852     tbl_dict = dict()
1853     for col in cols:
1854         for tst_name, tst_data in col["data"].items():
1855             if tbl_dict.get(tst_name, None) is None:
1856                 tbl_dict[tst_name] = {
1857                     "name": tst_data["name"]
1858                 }
1859             tbl_dict[tst_name][col["title"]] = {
1860                 "mean": tst_data["mean"],
1861                 "stdev": tst_data["stdev"]
1862             }
1863
1864     if not tbl_dict:
1865         logging.warning(f"No data for table {table.get('title', '')}!")
1866         return
1867
1868     tbl_lst = list()
1869     for tst_data in tbl_dict.values():
1870         row = [tst_data[u"name"], ]
1871         for col in cols:
1872             row.append(tst_data.get(col[u"title"], None))
1873         tbl_lst.append(row)
1874
1875     comparisons = table.get("comparisons", None)
1876     rcas = list()
1877     if comparisons and isinstance(comparisons, list):
1878         for idx, comp in enumerate(comparisons):
1879             try:
1880                 col_ref = int(comp["reference"])
1881                 col_cmp = int(comp["compare"])
1882             except KeyError:
1883                 logging.warning("Comparison: No references defined! Skipping.")
1884                 comparisons.pop(idx)
1885                 continue
1886             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1887                     col_ref == col_cmp):
1888                 logging.warning(f"Wrong values of reference={col_ref} "
1889                                 f"and/or compare={col_cmp}. Skipping.")
1890                 comparisons.pop(idx)
1891                 continue
1892             rca_file_name = comp.get("rca-file", None)
1893             if rca_file_name:
1894                 try:
1895                     with open(rca_file_name, "r") as file_handler:
1896                         rcas.append(
1897                             {
1898                                 "title": f"RCA{idx + 1}",
1899                                 "data": load(file_handler, Loader=FullLoader)
1900                             }
1901                         )
1902                 except (YAMLError, IOError) as err:
1903                     logging.warning(
1904                         f"The RCA file {rca_file_name} does not exist or "
1905                         f"it is corrupted!"
1906                     )
1907                     logging.debug(repr(err))
1908                     rcas.append(None)
1909             else:
1910                 rcas.append(None)
1911     else:
1912         comparisons = None
1913
1914     tbl_cmp_lst = list()
1915     if comparisons:
1916         for row in tbl_lst:
1917             new_row = deepcopy(row)
1918             for comp in comparisons:
1919                 ref_itm = row[int(comp["reference"])]
1920                 if ref_itm is None and \
1921                         comp.get("reference-alt", None) is not None:
1922                     ref_itm = row[int(comp["reference-alt"])]
1923                 cmp_itm = row[int(comp[u"compare"])]
1924                 if ref_itm is not None and cmp_itm is not None and \
1925                         ref_itm["mean"] is not None and \
1926                         cmp_itm["mean"] is not None and \
1927                         ref_itm["stdev"] is not None and \
1928                         cmp_itm["stdev"] is not None:
1929                     norm_factor_ref = table["norm_factor"].get(
1930                         comp.get("norm-ref", ""),
1931                         1.0
1932                     )
1933                     norm_factor_cmp = table["norm_factor"].get(
1934                         comp.get("norm-cmp", ""),
1935                         1.0
1936                     )
1937                     try:
1938                         delta, d_stdev = relative_change_stdev(
1939                             ref_itm["mean"] * norm_factor_ref,
1940                             cmp_itm["mean"] * norm_factor_cmp,
1941                             ref_itm["stdev"] * norm_factor_ref,
1942                             cmp_itm["stdev"] * norm_factor_cmp
1943                         )
1944                     except ZeroDivisionError:
1945                         break
1946                     if delta is None or math.isnan(delta):
1947                         break
1948                     new_row.append({
1949                         "mean": delta * 1e6,
1950                         "stdev": d_stdev * 1e6
1951                     })
1952                 else:
1953                     break
1954             else:
1955                 tbl_cmp_lst.append(new_row)
1956
1957     try:
1958         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1959         tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
1960     except TypeError as err:
1961         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1962
1963     tbl_for_csv = list()
1964     for line in tbl_cmp_lst:
1965         row = [line[0], ]
1966         for idx, itm in enumerate(line[1:]):
1967             if itm is None or not isinstance(itm, dict) or\
1968                     itm.get('mean', None) is None or \
1969                     itm.get('stdev', None) is None:
1970                 row.append("NT")
1971                 row.append("NT")
1972             else:
1973                 row.append(round(float(itm['mean']) / 1e6, 3))
1974                 row.append(round(float(itm['stdev']) / 1e6, 3))
1975         for rca in rcas:
1976             if rca is None:
1977                 continue
1978             rca_nr = rca["data"].get(row[0], "-")
1979             row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
1980         tbl_for_csv.append(row)
1981
1982     header_csv = ["Test Case", ]
1983     for col in cols:
1984         header_csv.append(f"Avg({col['title']})")
1985         header_csv.append(f"Stdev({col['title']})")
1986     for comp in comparisons:
1987         header_csv.append(
1988             f"Avg({comp.get('title', '')})"
1989         )
1990         header_csv.append(
1991             f"Stdev({comp.get('title', '')})"
1992         )
1993     for rca in rcas:
1994         if rca:
1995             header_csv.append(rca["title"])
1996
1997     legend_lst = table.get("legend", None)
1998     if legend_lst is None:
1999         legend = ""
2000     else:
2001         legend = "\n" + "\n".join(legend_lst) + "\n"
2002
2003     footnote = ""
2004     if rcas and any(rcas):
2005         footnote += "\nRoot Cause Analysis:\n"
2006         for rca in rcas:
2007             if rca:
2008                 footnote += f"{rca['data'].get('footnote', '')}\n"
2009
2010     csv_file_name = f"{table['output-file']}-csv.csv"
2011     with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2012         file_handler.write(
2013             ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
2014         )
2015         for test in tbl_for_csv:
2016             file_handler.write(
2017                 ",".join([f'"{item}"' for item in test]) + "\n"
2018             )
2019         if legend_lst:
2020             for item in legend_lst:
2021                 file_handler.write(f'"{item}"\n')
2022         if footnote:
2023             for itm in footnote.split("\n"):
2024                 file_handler.write(f'"{itm}"\n')
2025
2026     tbl_tmp = list()
2027     max_lens = [0, ] * len(tbl_cmp_lst[0])
2028     for line in tbl_cmp_lst:
2029         row = [line[0], ]
2030         for idx, itm in enumerate(line[1:]):
2031             if itm is None or not isinstance(itm, dict) or \
2032                     itm.get('mean', None) is None or \
2033                     itm.get('stdev', None) is None:
2034                 new_itm = "NT"
2035             else:
2036                 if idx < len(cols):
2037                     new_itm = (
2038                         f"{round(float(itm['mean']) / 1e6, 2)} "
2039                         f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2040                         replace("nan", "NaN")
2041                     )
2042                 else:
2043                     new_itm = (
2044                         f"{round(float(itm['mean']) / 1e6, 2):+} "
2045                         f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2046                         replace("nan", "NaN")
2047                     )
2048             if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
2049                 max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
2050             row.append(new_itm)
2051
2052         tbl_tmp.append(row)
2053
2054     header = ["Test Case", ]
2055     header.extend([col["title"] for col in cols])
2056     header.extend([comp.get("title", "") for comp in comparisons])
2057
2058     tbl_final = list()
2059     for line in tbl_tmp:
2060         row = [line[0], ]
2061         for idx, itm in enumerate(line[1:]):
2062             if itm in ("NT", "NaN"):
2063                 row.append(itm)
2064                 continue
2065             itm_lst = itm.rsplit("\u00B1", 1)
2066             itm_lst[-1] = \
2067                 f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2068             itm_str = "\u00B1".join(itm_lst)
2069
2070             if idx >= len(cols):
2071                 # Diffs
2072                 rca = rcas[idx - len(cols)]
2073                 if rca:
2074                     # Add rcas to diffs
2075                     rca_nr = rca["data"].get(row[0], None)
2076                     if rca_nr:
2077                         hdr_len = len(header[idx + 1]) - 1
2078                         if hdr_len < 19:
2079                             hdr_len = 19
2080                         rca_nr = f"[{rca_nr}]"
2081                         itm_str = (
2082                             f"{' ' * (4 - len(rca_nr))}{rca_nr}"
2083                             f"{' ' * (hdr_len - 4 - len(itm_str))}"
2084                             f"{itm_str}"
2085                         )
2086             row.append(itm_str)
2087         tbl_final.append(row)
2088
2089     # Generate csv tables:
2090     csv_file_name = f"{table['output-file']}.csv"
2091     logging.info(f"    Writing the file {csv_file_name}")
2092     with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2093         file_handler.write(";".join(header) + "\n")
2094         for test in tbl_final:
2095             file_handler.write(";".join([str(item) for item in test]) + "\n")
2096
2097     # Generate txt table:
2098     txt_file_name = f"{table['output-file']}.txt"
2099     logging.info(f"    Writing the file {txt_file_name}")
2100     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
2101
2102     with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
2103         file_handler.write(legend)
2104         file_handler.write(footnote)
2105
2106     # Generate html table:
2107     _tpc_generate_html_table(
2108         header,
2109         tbl_final,
2110         table['output-file'],
2111         legend=legend,
2112         footnote=footnote,
2113         sort_data=False,
2114         title=table.get("title", "")
2115     )
2116
2117
2118 def table_weekly_comparison(table, in_data):
2119     """Generate the table(s) with algorithm: table_weekly_comparison
2120     specified in the specification file.
2121
2122     :param table: Table to generate.
2123     :param in_data: Data to process.
2124     :type table: pandas.Series
2125     :type in_data: InputData
2126     """
2127     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2128
2129     # Transform the data
2130     logging.info(
2131         f"    Creating the data set for the {table.get(u'type', u'')} "
2132         f"{table.get(u'title', u'')}."
2133     )
2134
2135     incl_tests = table.get(u"include-tests", None)
2136     if incl_tests not in (u"NDR", u"PDR"):
2137         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2138         return
2139
2140     nr_cols = table.get(u"nr-of-data-columns", None)
2141     if not nr_cols or nr_cols < 2:
2142         logging.error(
2143             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2144         )
2145         return
2146
2147     data = in_data.filter_data(
2148         table,
2149         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2150         continue_on_error=True
2151     )
2152
2153     header = [
2154         [u"VPP Version", ],
2155         [u"Start Timestamp", ],
2156         [u"CSIT Build", ],
2157         [u"CSIT Testbed", ]
2158     ]
2159     tbl_dict = dict()
2160     idx = 0
2161     tb_tbl = table.get(u"testbeds", None)
2162     for job_name, job_data in data.items():
2163         for build_nr, build in job_data.items():
2164             if idx >= nr_cols:
2165                 break
2166             if build.empty:
2167                 continue
2168
2169             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2170             if tb_ip and tb_tbl:
2171                 testbed = tb_tbl.get(tb_ip, u"")
2172             else:
2173                 testbed = u""
2174             header[2].insert(1, build_nr)
2175             header[3].insert(1, testbed)
2176             header[1].insert(
2177                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2178             )
2179             header[0].insert(
2180                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2181             )
2182
2183             for tst_name, tst_data in build.items():
2184                 tst_name_mod = \
2185                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2186                 if not tbl_dict.get(tst_name_mod, None):
2187                     tbl_dict[tst_name_mod] = dict(
2188                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2189                     )
2190                 try:
2191                     tbl_dict[tst_name_mod][-idx - 1] = \
2192                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2193                 except (TypeError, IndexError, KeyError, ValueError):
2194                     pass
2195             idx += 1
2196
2197     if idx < nr_cols:
2198         logging.error(u"Not enough data to build the table! Skipping")
2199         return
2200
2201     cmp_dict = dict()
2202     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2203         idx_ref = cmp.get(u"reference", None)
2204         idx_cmp = cmp.get(u"compare", None)
2205         if idx_ref is None or idx_cmp is None:
2206             continue
2207         header[0].append(
2208             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2209             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2210         )
2211         header[1].append(u"")
2212         header[2].append(u"")
2213         header[3].append(u"")
2214         for tst_name, tst_data in tbl_dict.items():
2215             if not cmp_dict.get(tst_name, None):
2216                 cmp_dict[tst_name] = list()
2217             ref_data = tst_data.get(idx_ref, None)
2218             cmp_data = tst_data.get(idx_cmp, None)
2219             if ref_data is None or cmp_data is None:
2220                 cmp_dict[tst_name].append(float(u'nan'))
2221             else:
2222                 cmp_dict[tst_name].append(
2223                     relative_change(ref_data, cmp_data)
2224                 )
2225
2226     tbl_lst_none = list()
2227     tbl_lst = list()
2228     for tst_name, tst_data in tbl_dict.items():
2229         itm_lst = [tst_data[u"name"], ]
2230         for idx in range(nr_cols):
2231             item = tst_data.get(-idx - 1, None)
2232             if item is None:
2233                 itm_lst.insert(1, None)
2234             else:
2235                 itm_lst.insert(1, round(item / 1e6, 1))
2236         itm_lst.extend(
2237             [
2238                 None if itm is None else round(itm, 1)
2239                 for itm in cmp_dict[tst_name]
2240             ]
2241         )
2242         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2243             tbl_lst_none.append(itm_lst)
2244         else:
2245             tbl_lst.append(itm_lst)
2246
2247     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2248     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2249     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2250     tbl_lst.extend(tbl_lst_none)
2251
2252     # Generate csv table:
2253     csv_file_name = f"{table[u'output-file']}.csv"
2254     logging.info(f"    Writing the file {csv_file_name}")
2255     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2256         for hdr in header:
2257             file_handler.write(u",".join(hdr) + u"\n")
2258         for test in tbl_lst:
2259             file_handler.write(u",".join(
2260                 [
2261                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2262                     replace(u"null", u"-") for item in test
2263                 ]
2264             ) + u"\n")
2265
2266     txt_file_name = f"{table[u'output-file']}.txt"
2267     logging.info(f"    Writing the file {txt_file_name}")
2268     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2269
2270     # Reorganize header in txt table
2271     txt_table = list()
2272     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2273         for line in list(file_handler):
2274             txt_table.append(line)
2275     try:
2276         txt_table.insert(5, txt_table.pop(2))
2277         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2278             file_handler.writelines(txt_table)
2279     except IndexError:
2280         pass
2281
2282     # Generate html table:
2283     hdr_html = [
2284         u"<br>".join(row) for row in zip(*header)
2285     ]
2286     _tpc_generate_html_table(
2287         hdr_html,
2288         tbl_lst,
2289         table[u'output-file'],
2290         sort_data=True,
2291         title=table.get(u"title", u""),
2292         generate_rst=False
2293     )