report: indexed error message added to report emails
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
35
36 from pal_utils import mean, stdev, classify_anomalies, \
37     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
38
39
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41
42
43 def generate_tables(spec, data):
44     """Generate all tables specified in the specification file.
45
46     :param spec: Specification read from the specification file.
47     :param data: Data to process.
48     :type spec: Specification
49     :type data: InputData
50     """
51
52     generator = {
53         u"table_merged_details": table_merged_details,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html,
61         u"table_comparison": table_comparison,
62         u"table_weekly_comparison": table_weekly_comparison
63     }
64
65     logging.info(u"Generating the tables ...")
66     for table in spec.tables:
67         try:
68             if table[u"algorithm"] == u"table_weekly_comparison":
69                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70             generator[table[u"algorithm"]](table, data)
71         except NameError as err:
72             logging.error(
73                 f"Probably algorithm {table[u'algorithm']} is not defined: "
74                 f"{repr(err)}"
75             )
76     logging.info(u"Done.")
77
78
79 def table_oper_data_html(table, input_data):
80     """Generate the table(s) with algorithm: html_table_oper_data
81     specified in the specification file.
82
83     :param table: Table to generate.
84     :param input_data: Data to process.
85     :type table: pandas.Series
86     :type input_data: InputData
87     """
88
89     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
90     # Transform the data
91     logging.info(
92         f"    Creating the data set for the {table.get(u'type', u'')} "
93         f"{table.get(u'title', u'')}."
94     )
95     data = input_data.filter_data(
96         table,
97         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98         continue_on_error=True
99     )
100     if data.empty:
101         return
102     data = input_data.merge_data(data)
103
104     sort_tests = table.get(u"sort", None)
105     if sort_tests:
106         args = dict(
107             inplace=True,
108             ascending=(sort_tests == u"ascending")
109         )
110         data.sort_index(**args)
111
112     suites = input_data.filter_data(
113         table,
114         continue_on_error=True,
115         data_set=u"suites"
116     )
117     if suites.empty:
118         return
119     suites = input_data.merge_data(suites)
120
121     def _generate_html_table(tst_data):
122         """Generate an HTML table with operational data for the given test.
123
124         :param tst_data: Test data to be used to generate the table.
125         :type tst_data: pandas.Series
126         :returns: HTML table with operational data.
127         :rtype: str
128         """
129
130         colors = {
131             u"header": u"#7eade7",
132             u"empty": u"#ffffff",
133             u"body": (u"#e9f1fb", u"#d4e4f7")
134         }
135
136         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137
138         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139         thead = ET.SubElement(
140             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141         )
142         thead.text = tst_data[u"name"]
143
144         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145         thead = ET.SubElement(
146             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147         )
148         thead.text = u"\t"
149
150         if tst_data.get(u"telemetry-show-run", None) is None or \
151                 isinstance(tst_data[u"telemetry-show-run"], str):
152             trow = ET.SubElement(
153                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
154             )
155             tcol = ET.SubElement(
156                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
157             )
158             tcol.text = u"No Data"
159
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
162             )
163             thead = ET.SubElement(
164                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
165             )
166             font = ET.SubElement(
167                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168             )
169             font.text = u"."
170             return str(ET.tostring(tbl, encoding=u"unicode"))
171
172         tbl_hdr = (
173             u"Name",
174             u"Nr of Vectors",
175             u"Nr of Packets",
176             u"Suspends",
177             u"Cycles per Packet",
178             u"Average Vector Size"
179         )
180
181         for dut_data in tst_data[u"telemetry-show-run"].values():
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
184             )
185             tcol = ET.SubElement(
186                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
187             )
188             if dut_data.get(u"runtime", None) is None:
189                 tcol.text = u"No Data"
190                 continue
191
192             runtime = dict()
193             for item in dut_data[u"runtime"].get(u"data", tuple()):
194                 tid = int(item[u"labels"][u"thread_id"])
195                 if runtime.get(tid, None) is None:
196                     runtime[tid] = dict()
197                 gnode = item[u"labels"][u"graph_node"]
198                 if runtime[tid].get(gnode, None) is None:
199                     runtime[tid][gnode] = dict()
200                 try:
201                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
202                 except ValueError:
203                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
204
205             threads = dict({idx: list() for idx in range(len(runtime))})
206             for idx, run_data in runtime.items():
207                 for gnode, gdata in run_data.items():
208                     if gdata[u"vectors"] > 0:
209                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
210                     elif gdata[u"calls"] > 0:
211                         clocks = gdata[u"clocks"] / gdata[u"calls"]
212                     elif gdata[u"suspends"] > 0:
213                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
214                     else:
215                         clocks = 0.0
216                     if gdata[u"calls"] > 0:
217                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
218                     else:
219                         vectors_call = 0.0
220                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221                             int(gdata[u"suspends"]):
222                         threads[idx].append([
223                             gnode,
224                             int(gdata[u"calls"]),
225                             int(gdata[u"vectors"]),
226                             int(gdata[u"suspends"]),
227                             clocks,
228                             vectors_call
229                         ])
230
231             bold = ET.SubElement(tcol, u"b")
232             bold.text = (
233                 f"Host IP: {dut_data.get(u'host', '')}, "
234                 f"Socket: {dut_data.get(u'socket', '')}"
235             )
236             trow = ET.SubElement(
237                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238             )
239             thead = ET.SubElement(
240                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241             )
242             thead.text = u"\t"
243
244             for thread_nr, thread in threads.items():
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247                 )
248                 tcol = ET.SubElement(
249                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 bold = ET.SubElement(tcol, u"b")
252                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253                 trow = ET.SubElement(
254                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255                 )
256                 for idx, col in enumerate(tbl_hdr):
257                     tcol = ET.SubElement(
258                         trow, u"td",
259                         attrib=dict(align=u"right" if idx else u"left")
260                     )
261                     font = ET.SubElement(
262                         tcol, u"font", attrib=dict(size=u"2")
263                     )
264                     bold = ET.SubElement(font, u"b")
265                     bold.text = col
266                 for row_nr, row in enumerate(thread):
267                     trow = ET.SubElement(
268                         tbl, u"tr",
269                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
270                     )
271                     for idx, col in enumerate(row):
272                         tcol = ET.SubElement(
273                             trow, u"td",
274                             attrib=dict(align=u"right" if idx else u"left")
275                         )
276                         font = ET.SubElement(
277                             tcol, u"font", attrib=dict(size=u"2")
278                         )
279                         if isinstance(col, float):
280                             font.text = f"{col:.2f}"
281                         else:
282                             font.text = str(col)
283                 trow = ET.SubElement(
284                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
285                 )
286                 thead = ET.SubElement(
287                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288                 )
289                 thead.text = u"\t"
290
291         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292         thead = ET.SubElement(
293             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
294         )
295         font = ET.SubElement(
296             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
297         )
298         font.text = u"."
299
300         return str(ET.tostring(tbl, encoding=u"unicode"))
301
302     for suite in suites.values:
303         html_table = str()
304         for test_data in data.values:
305             if test_data[u"parent"] not in suite[u"name"]:
306                 continue
307             html_table += _generate_html_table(test_data)
308         if not html_table:
309             continue
310         try:
311             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312             with open(f"{file_name}", u'w') as html_file:
313                 logging.info(f"    Writing file: {file_name}")
314                 html_file.write(u".. raw:: html\n\n\t")
315                 html_file.write(html_table)
316                 html_file.write(u"\n\t<p><br><br></p>\n")
317         except KeyError:
318             logging.warning(u"The output file is not defined.")
319             return
320     logging.info(u"  Done.")
321
322
323 def table_merged_details(table, input_data):
324     """Generate the table(s) with algorithm: table_merged_details
325     specified in the specification file.
326
327     :param table: Table to generate.
328     :param input_data: Data to process.
329     :type table: pandas.Series
330     :type input_data: InputData
331     """
332
333     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
334
335     # Transform the data
336     logging.info(
337         f"    Creating the data set for the {table.get(u'type', u'')} "
338         f"{table.get(u'title', u'')}."
339     )
340     data = input_data.filter_data(table, continue_on_error=True)
341     data = input_data.merge_data(data)
342
343     sort_tests = table.get(u"sort", None)
344     if sort_tests:
345         args = dict(
346             inplace=True,
347             ascending=(sort_tests == u"ascending")
348         )
349         data.sort_index(**args)
350
351     suites = input_data.filter_data(
352         table, continue_on_error=True, data_set=u"suites")
353     suites = input_data.merge_data(suites)
354
355     # Prepare the header of the tables
356     header = list()
357     for column in table[u"columns"]:
358         header.append(
359             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
360         )
361
362     for suite in suites.values:
363         # Generate data
364         suite_name = suite[u"name"]
365         table_lst = list()
366         for test in data.keys():
367             if data[test][u"status"] != u"PASS" or \
368                     data[test][u"parent"] not in suite_name:
369                 continue
370             row_lst = list()
371             for column in table[u"columns"]:
372                 try:
373                     col_data = str(data[test][column[
374                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
375                     # Do not include tests with "Test Failed" in test message
376                     if u"Test Failed" in col_data:
377                         continue
378                     col_data = col_data.replace(
379                         u"No Data", u"Not Captured     "
380                     )
381                     if column[u"data"].split(u" ")[1] in (u"name", ):
382                         if len(col_data) > 30:
383                             col_data_lst = col_data.split(u"-")
384                             half = int(len(col_data_lst) / 2)
385                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
386                                        f"- |br| " \
387                                        f"{u'-'.join(col_data_lst[half:])}"
388                         col_data = f" |prein| {col_data} |preout| "
389                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
390                         # Temporary solution: remove NDR results from message:
391                         if bool(table.get(u'remove-ndr', False)):
392                             try:
393                                 col_data = col_data.split(u"\n", 1)[1]
394                             except IndexError:
395                                 pass
396                         col_data = col_data.replace(u'\n', u' |br| ').\
397                             replace(u'\r', u'').replace(u'"', u"'")
398                         col_data = f" |prein| {col_data} |preout| "
399                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400                         col_data = col_data.replace(u'\n', u' |br| ')
401                         col_data = f" |prein| {col_data[:-5]} |preout| "
402                     row_lst.append(f'"{col_data}"')
403                 except KeyError:
404                     row_lst.append(u'"Not captured"')
405             if len(row_lst) == len(table[u"columns"]):
406                 table_lst.append(row_lst)
407
408         # Write the data to file
409         if table_lst:
410             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412             logging.info(f"      Writing file: {file_name}")
413             with open(file_name, u"wt") as file_handler:
414                 file_handler.write(u",".join(header) + u"\n")
415                 for item in table_lst:
416                     file_handler.write(u",".join(item) + u"\n")
417
418     logging.info(u"  Done.")
419
420
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422     """Modify a test name by replacing its parts.
423
424     :param test_name: Test name to be modified.
425     :param ignore_nic: If True, NIC is removed from TC name.
426     :type test_name: str
427     :type ignore_nic: bool
428     :returns: Modified test name.
429     :rtype: str
430     """
431     test_name_mod = test_name.\
432         replace(u"-ndrpdr", u"").\
433         replace(u"1t1c", u"1c").\
434         replace(u"2t1c", u"1c"). \
435         replace(u"2t2c", u"2c").\
436         replace(u"4t2c", u"2c"). \
437         replace(u"4t4c", u"4c").\
438         replace(u"8t4c", u"4c")
439
440     if ignore_nic:
441         return re.sub(REGEX_NIC, u"", test_name_mod)
442     return test_name_mod
443
444
445 def _tpc_modify_displayed_test_name(test_name):
446     """Modify a test name which is displayed in a table by replacing its parts.
447
448     :param test_name: Test name to be modified.
449     :type test_name: str
450     :returns: Modified test name.
451     :rtype: str
452     """
453     return test_name.\
454         replace(u"1t1c", u"1c").\
455         replace(u"2t1c", u"1c"). \
456         replace(u"2t2c", u"2c").\
457         replace(u"4t2c", u"2c"). \
458         replace(u"4t4c", u"4c").\
459         replace(u"8t4c", u"4c")
460
461
462 def _tpc_insert_data(target, src, include_tests):
463     """Insert src data to the target structure.
464
465     :param target: Target structure where the data is placed.
466     :param src: Source data to be placed into the target structure.
467     :param include_tests: Which results will be included (MRR, NDR, PDR).
468     :type target: list
469     :type src: dict
470     :type include_tests: str
471     """
472     try:
473         if include_tests == u"MRR":
474             target[u"mean"] = src[u"result"][u"receive-rate"]
475             target[u"stdev"] = src[u"result"][u"receive-stdev"]
476         elif include_tests == u"PDR":
477             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478         elif include_tests == u"NDR":
479             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480         elif u"latency" in include_tests:
481             keys = include_tests.split(u"-")
482             if len(keys) == 4:
483                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
484                 target[u"data"].append(
485                     float(u"nan") if lat == -1 else lat * 1e6
486                 )
487     except (KeyError, TypeError):
488         pass
489
490
491 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
492                              footnote=u"", sort_data=True, title=u"",
493                              generate_rst=True):
494     """Generate html table from input data with simple sorting possibility.
495
496     :param header: Table header.
497     :param data: Input data to be included in the table. It is a list of lists.
498         Inner lists are rows in the table. All inner lists must be of the same
499         length. The length of these lists must be the same as the length of the
500         header.
501     :param out_file_name: The name (relative or full path) where the
502         generated html table is written.
503     :param legend: The legend to display below the table.
504     :param footnote: The footnote to display below the table (and legend).
505     :param sort_data: If True the data sorting is enabled.
506     :param title: The table (and file) title.
507     :param generate_rst: If True, wrapping rst file is generated.
508     :type header: list
509     :type data: list of lists
510     :type out_file_name: str
511     :type legend: str
512     :type footnote: str
513     :type sort_data: bool
514     :type title: str
515     :type generate_rst: bool
516     """
517
518     try:
519         idx = header.index(u"Test Case")
520     except ValueError:
521         idx = 0
522     params = {
523         u"align-hdr": (
524             [u"left", u"right"],
525             [u"left", u"left", u"right"],
526             [u"left", u"left", u"left", u"right"]
527         ),
528         u"align-itm": (
529             [u"left", u"right"],
530             [u"left", u"left", u"right"],
531             [u"left", u"left", u"left", u"right"]
532         ),
533         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
534     }
535
536     df_data = pd.DataFrame(data, columns=header)
537
538     if sort_data:
539         df_sorted = [df_data.sort_values(
540             by=[key, header[idx]], ascending=[True, True]
541             if key != header[idx] else [False, True]) for key in header]
542         df_sorted_rev = [df_data.sort_values(
543             by=[key, header[idx]], ascending=[False, True]
544             if key != header[idx] else [True, True]) for key in header]
545         df_sorted.extend(df_sorted_rev)
546     else:
547         df_sorted = df_data
548
549     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
550                    for idx in range(len(df_data))]]
551     table_header = dict(
552         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
553         fill_color=u"#7eade7",
554         align=params[u"align-hdr"][idx],
555         font=dict(
556             family=u"Courier New",
557             size=12
558         )
559     )
560
561     fig = go.Figure()
562
563     if sort_data:
564         for table in df_sorted:
565             columns = [table.get(col) for col in header]
566             fig.add_trace(
567                 go.Table(
568                     columnwidth=params[u"width"][idx],
569                     header=table_header,
570                     cells=dict(
571                         values=columns,
572                         fill_color=fill_color,
573                         align=params[u"align-itm"][idx],
574                         font=dict(
575                             family=u"Courier New",
576                             size=12
577                         )
578                     )
579                 )
580             )
581
582         buttons = list()
583         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
585         for idx, hdr in enumerate(menu_items):
586             visible = [False, ] * len(menu_items)
587             visible[idx] = True
588             buttons.append(
589                 dict(
590                     label=hdr.replace(u" [Mpps]", u""),
591                     method=u"update",
592                     args=[{u"visible": visible}],
593                 )
594             )
595
596         fig.update_layout(
597             updatemenus=[
598                 go.layout.Updatemenu(
599                     type=u"dropdown",
600                     direction=u"down",
601                     x=0.0,
602                     xanchor=u"left",
603                     y=1.002,
604                     yanchor=u"bottom",
605                     active=len(menu_items) - 1,
606                     buttons=list(buttons)
607                 )
608             ],
609         )
610     else:
611         fig.add_trace(
612             go.Table(
613                 columnwidth=params[u"width"][idx],
614                 header=table_header,
615                 cells=dict(
616                     values=[df_sorted.get(col) for col in header],
617                     fill_color=fill_color,
618                     align=params[u"align-itm"][idx],
619                     font=dict(
620                         family=u"Courier New",
621                         size=12
622                     )
623                 )
624             )
625         )
626
627     ploff.plot(
628         fig,
629         show_link=False,
630         auto_open=False,
631         filename=f"{out_file_name}_in.html"
632     )
633
634     if not generate_rst:
635         return
636
637     file_name = out_file_name.split(u"/")[-1]
638     if u"vpp" in out_file_name:
639         path = u"_tmp/src/vpp_performance_tests/comparisons/"
640     else:
641         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
642     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
643     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
644         rst_file.write(
645             u"\n"
646             u".. |br| raw:: html\n\n    <br />\n\n\n"
647             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
648             u".. |preout| raw:: html\n\n    </pre>\n\n"
649         )
650         if title:
651             rst_file.write(f"{title}\n")
652             rst_file.write(f"{u'`' * len(title)}\n\n")
653         rst_file.write(
654             u".. raw:: html\n\n"
655             f'    <iframe frameborder="0" scrolling="no" '
656             f'width="1600" height="1200" '
657             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
658             f'</iframe>\n\n'
659         )
660
661         if legend:
662             try:
663                 itm_lst = legend[1:-2].split(u"\n")
664                 rst_file.write(
665                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
666                 )
667             except IndexError as err:
668                 logging.error(f"Legend cannot be written to html file\n{err}")
669         if footnote:
670             try:
671                 itm_lst = footnote[1:].split(u"\n")
672                 rst_file.write(
673                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
674                 )
675             except IndexError as err:
676                 logging.error(f"Footnote cannot be written to html file\n{err}")
677
678
679 def table_soak_vs_ndr(table, input_data):
680     """Generate the table(s) with algorithm: table_soak_vs_ndr
681     specified in the specification file.
682
683     :param table: Table to generate.
684     :param input_data: Data to process.
685     :type table: pandas.Series
686     :type input_data: InputData
687     """
688
689     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
690
691     # Transform the data
692     logging.info(
693         f"    Creating the data set for the {table.get(u'type', u'')} "
694         f"{table.get(u'title', u'')}."
695     )
696     data = input_data.filter_data(table, continue_on_error=True)
697
698     # Prepare the header of the table
699     try:
700         header = [
701             u"Test Case",
702             f"Avg({table[u'reference'][u'title']})",
703             f"Stdev({table[u'reference'][u'title']})",
704             f"Avg({table[u'compare'][u'title']})",
705             f"Stdev{table[u'compare'][u'title']})",
706             u"Diff",
707             u"Stdev(Diff)"
708         ]
709         header_str = u";".join(header) + u"\n"
710         legend = (
711             u"\nLegend:\n"
712             f"Avg({table[u'reference'][u'title']}): "
713             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
714             f"from a series of runs of the listed tests.\n"
715             f"Stdev({table[u'reference'][u'title']}): "
716             f"Standard deviation value of {table[u'reference'][u'title']} "
717             f"[Mpps] computed from a series of runs of the listed tests.\n"
718             f"Avg({table[u'compare'][u'title']}): "
719             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
720             f"a series of runs of the listed tests.\n"
721             f"Stdev({table[u'compare'][u'title']}): "
722             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
723             f"computed from a series of runs of the listed tests.\n"
724             f"Diff({table[u'reference'][u'title']},"
725             f"{table[u'compare'][u'title']}): "
726             f"Percentage change calculated for mean values.\n"
727             u"Stdev(Diff): "
728             u"Standard deviation of percentage change calculated for mean "
729             u"values."
730         )
731     except (AttributeError, KeyError) as err:
732         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
733         return
734
735     # Create a list of available SOAK test results:
736     tbl_dict = dict()
737     for job, builds in table[u"compare"][u"data"].items():
738         for build in builds:
739             for tst_name, tst_data in data[job][str(build)].items():
740                 if tst_data[u"type"] == u"SOAK":
741                     tst_name_mod = tst_name.replace(u"-soak", u"")
742                     if tbl_dict.get(tst_name_mod, None) is None:
743                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
744                         nic = groups.group(0) if groups else u""
745                         name = (
746                             f"{nic}-"
747                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
748                         )
749                         tbl_dict[tst_name_mod] = {
750                             u"name": name,
751                             u"ref-data": list(),
752                             u"cmp-data": list()
753                         }
754                     try:
755                         tbl_dict[tst_name_mod][u"cmp-data"].append(
756                             tst_data[u"throughput"][u"LOWER"])
757                     except (KeyError, TypeError):
758                         pass
759     tests_lst = tbl_dict.keys()
760
761     # Add corresponding NDR test results:
762     for job, builds in table[u"reference"][u"data"].items():
763         for build in builds:
764             for tst_name, tst_data in data[job][str(build)].items():
765                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
766                     replace(u"-mrr", u"")
767                 if tst_name_mod not in tests_lst:
768                     continue
769                 try:
770                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
771                         continue
772                     if table[u"include-tests"] == u"MRR":
773                         result = (tst_data[u"result"][u"receive-rate"],
774                                   tst_data[u"result"][u"receive-stdev"])
775                     elif table[u"include-tests"] == u"PDR":
776                         result = \
777                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
778                     elif table[u"include-tests"] == u"NDR":
779                         result = \
780                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
781                     else:
782                         result = None
783                     if result is not None:
784                         tbl_dict[tst_name_mod][u"ref-data"].append(
785                             result)
786                 except (KeyError, TypeError):
787                     continue
788
789     tbl_lst = list()
790     for tst_name in tbl_dict:
791         item = [tbl_dict[tst_name][u"name"], ]
792         data_r = tbl_dict[tst_name][u"ref-data"]
793         if data_r:
794             if table[u"include-tests"] == u"MRR":
795                 data_r_mean = data_r[0][0]
796                 data_r_stdev = data_r[0][1]
797             else:
798                 data_r_mean = mean(data_r)
799                 data_r_stdev = stdev(data_r)
800             item.append(round(data_r_mean / 1e6, 1))
801             item.append(round(data_r_stdev / 1e6, 1))
802         else:
803             data_r_mean = None
804             data_r_stdev = None
805             item.extend([None, None])
806         data_c = tbl_dict[tst_name][u"cmp-data"]
807         if data_c:
808             if table[u"include-tests"] == u"MRR":
809                 data_c_mean = data_c[0][0]
810                 data_c_stdev = data_c[0][1]
811             else:
812                 data_c_mean = mean(data_c)
813                 data_c_stdev = stdev(data_c)
814             item.append(round(data_c_mean / 1e6, 1))
815             item.append(round(data_c_stdev / 1e6, 1))
816         else:
817             data_c_mean = None
818             data_c_stdev = None
819             item.extend([None, None])
820         if data_r_mean is not None and data_c_mean is not None:
821             delta, d_stdev = relative_change_stdev(
822                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
823             try:
824                 item.append(round(delta))
825             except ValueError:
826                 item.append(delta)
827             try:
828                 item.append(round(d_stdev))
829             except ValueError:
830                 item.append(d_stdev)
831             tbl_lst.append(item)
832
833     # Sort the table according to the relative change
834     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
835
836     # Generate csv tables:
837     csv_file_name = f"{table[u'output-file']}.csv"
838     with open(csv_file_name, u"wt") as file_handler:
839         file_handler.write(header_str)
840         for test in tbl_lst:
841             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
842
843     convert_csv_to_pretty_txt(
844         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
845     )
846     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
847         file_handler.write(legend)
848
849     # Generate html table:
850     _tpc_generate_html_table(
851         header,
852         tbl_lst,
853         table[u'output-file'],
854         legend=legend,
855         title=table.get(u"title", u"")
856     )
857
858
859 def table_perf_trending_dash(table, input_data):
860     """Generate the table(s) with algorithm:
861     table_perf_trending_dash
862     specified in the specification file.
863
864     :param table: Table to generate.
865     :param input_data: Data to process.
866     :type table: pandas.Series
867     :type input_data: InputData
868     """
869
870     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
871
872     # Transform the data
873     logging.info(
874         f"    Creating the data set for the {table.get(u'type', u'')} "
875         f"{table.get(u'title', u'')}."
876     )
877     data = input_data.filter_data(table, continue_on_error=True)
878
879     # Prepare the header of the tables
880     header = [
881         u"Test Case",
882         u"Trend [Mpps]",
883         u"Short-Term Change [%]",
884         u"Long-Term Change [%]",
885         u"Regressions [#]",
886         u"Progressions [#]"
887     ]
888     header_str = u",".join(header) + u"\n"
889
890     incl_tests = table.get(u"include-tests", u"MRR")
891
892     # Prepare data to the table:
893     tbl_dict = dict()
894     for job, builds in table[u"data"].items():
895         for build in builds:
896             for tst_name, tst_data in data[job][str(build)].items():
897                 if tst_name.lower() in table.get(u"ignore-list", list()):
898                     continue
899                 if tbl_dict.get(tst_name, None) is None:
900                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
901                     if not groups:
902                         continue
903                     nic = groups.group(0)
904                     tbl_dict[tst_name] = {
905                         u"name": f"{nic}-{tst_data[u'name']}",
906                         u"data": OrderedDict()
907                     }
908                 try:
909                     if incl_tests == u"MRR":
910                         tbl_dict[tst_name][u"data"][str(build)] = \
911                             tst_data[u"result"][u"receive-rate"]
912                     elif incl_tests == u"NDR":
913                         tbl_dict[tst_name][u"data"][str(build)] = \
914                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
915                     elif incl_tests == u"PDR":
916                         tbl_dict[tst_name][u"data"][str(build)] = \
917                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
918                 except (TypeError, KeyError):
919                     pass  # No data in output.xml for this test
920
921     tbl_lst = list()
922     for tst_name in tbl_dict:
923         data_t = tbl_dict[tst_name][u"data"]
924         if len(data_t) < 2:
925             continue
926
927         try:
928             classification_lst, avgs, _ = classify_anomalies(data_t)
929         except ValueError as err:
930             logging.info(f"{err} Skipping")
931             return
932
933         win_size = min(len(data_t), table[u"window"])
934         long_win_size = min(len(data_t), table[u"long-trend-window"])
935
936         try:
937             max_long_avg = max(
938                 [x for x in avgs[-long_win_size:-win_size]
939                  if not isnan(x)])
940         except ValueError:
941             max_long_avg = nan
942         last_avg = avgs[-1]
943         avg_week_ago = avgs[max(-win_size, -len(avgs))]
944
945         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
946             rel_change_last = nan
947         else:
948             rel_change_last = round(
949                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
950
951         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
952             rel_change_long = nan
953         else:
954             rel_change_long = round(
955                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
956
957         if classification_lst:
958             if isnan(rel_change_last) and isnan(rel_change_long):
959                 continue
960             if isnan(last_avg) or isnan(rel_change_last) or \
961                     isnan(rel_change_long):
962                 continue
963             tbl_lst.append(
964                 [tbl_dict[tst_name][u"name"],
965                  round(last_avg / 1e6, 2),
966                  rel_change_last,
967                  rel_change_long,
968                  classification_lst[-win_size+1:].count(u"regression"),
969                  classification_lst[-win_size+1:].count(u"progression")])
970
971     tbl_lst.sort(key=lambda rel: rel[0])
972     tbl_lst.sort(key=lambda rel: rel[3])
973     tbl_lst.sort(key=lambda rel: rel[2])
974
975     tbl_sorted = list()
976     for nrr in range(table[u"window"], -1, -1):
977         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
978         for nrp in range(table[u"window"], -1, -1):
979             tbl_out = [item for item in tbl_reg if item[5] == nrp]
980             tbl_sorted.extend(tbl_out)
981
982     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
983
984     logging.info(f"    Writing file: {file_name}")
985     with open(file_name, u"wt") as file_handler:
986         file_handler.write(header_str)
987         for test in tbl_sorted:
988             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
989
990     logging.info(f"    Writing file: {table[u'output-file']}.txt")
991     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
992
993
994 def _generate_url(testbed, test_name):
995     """Generate URL to a trending plot from the name of the test case.
996
997     :param testbed: The testbed used for testing.
998     :param test_name: The name of the test case.
999     :type testbed: str
1000     :type test_name: str
1001     :returns: The URL to the plot with the trending data for the given test
1002         case.
1003     :rtype str
1004     """
1005
1006     if u"x520" in test_name:
1007         nic = u"x520"
1008     elif u"x710" in test_name:
1009         nic = u"x710"
1010     elif u"xl710" in test_name:
1011         nic = u"xl710"
1012     elif u"xxv710" in test_name:
1013         nic = u"xxv710"
1014     elif u"vic1227" in test_name:
1015         nic = u"vic1227"
1016     elif u"vic1385" in test_name:
1017         nic = u"vic1385"
1018     elif u"x553" in test_name:
1019         nic = u"x553"
1020     elif u"cx556" in test_name or u"cx556a" in test_name:
1021         nic = u"cx556a"
1022     else:
1023         nic = u""
1024
1025     if u"64b" in test_name:
1026         frame_size = u"64b"
1027     elif u"78b" in test_name:
1028         frame_size = u"78b"
1029     elif u"imix" in test_name:
1030         frame_size = u"imix"
1031     elif u"9000b" in test_name:
1032         frame_size = u"9000b"
1033     elif u"1518b" in test_name:
1034         frame_size = u"1518b"
1035     elif u"114b" in test_name:
1036         frame_size = u"114b"
1037     else:
1038         frame_size = u""
1039
1040     if u"1t1c" in test_name or \
1041         (u"-1c-" in test_name and
1042          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1043         cores = u"1t1c"
1044     elif u"2t2c" in test_name or \
1045          (u"-2c-" in test_name and
1046           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1047         cores = u"2t2c"
1048     elif u"4t4c" in test_name or \
1049          (u"-4c-" in test_name and
1050           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1051         cores = u"4t4c"
1052     elif u"2t1c" in test_name or \
1053          (u"-1c-" in test_name and
1054           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1055         cores = u"2t1c"
1056     elif u"4t2c" in test_name or \
1057          (u"-2c-" in test_name and
1058           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1059         cores = u"4t2c"
1060     elif u"8t4c" in test_name or \
1061          (u"-4c-" in test_name and
1062           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1063         cores = u"8t4c"
1064     else:
1065         cores = u""
1066
1067     if u"testpmd" in test_name:
1068         driver = u"testpmd"
1069     elif u"l3fwd" in test_name:
1070         driver = u"l3fwd"
1071     elif u"avf" in test_name:
1072         driver = u"avf"
1073     elif u"rdma" in test_name:
1074         driver = u"rdma"
1075     elif u"dnv" in testbed or u"tsh" in testbed:
1076         driver = u"ixgbe"
1077     else:
1078         driver = u"dpdk"
1079
1080     if u"macip-iacl1s" in test_name:
1081         bsf = u"features-macip-iacl1"
1082     elif u"macip-iacl10s" in test_name:
1083         bsf = u"features-macip-iacl10"
1084     elif u"macip-iacl50s" in test_name:
1085         bsf = u"features-macip-iacl50"
1086     elif u"iacl1s" in test_name:
1087         bsf = u"features-iacl1"
1088     elif u"iacl10s" in test_name:
1089         bsf = u"features-iacl10"
1090     elif u"iacl50s" in test_name:
1091         bsf = u"features-iacl50"
1092     elif u"oacl1s" in test_name:
1093         bsf = u"features-oacl1"
1094     elif u"oacl10s" in test_name:
1095         bsf = u"features-oacl10"
1096     elif u"oacl50s" in test_name:
1097         bsf = u"features-oacl50"
1098     elif u"nat44det" in test_name:
1099         bsf = u"nat44det-bidir"
1100     elif u"nat44ed" in test_name and u"udir" in test_name:
1101         bsf = u"nat44ed-udir"
1102     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1103         bsf = u"udp-cps"
1104     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1105         bsf = u"tcp-cps"
1106     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1107         bsf = u"udp-pps"
1108     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1109         bsf = u"tcp-pps"
1110     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1111         bsf = u"udp-tput"
1112     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1113         bsf = u"tcp-tput"
1114     elif u"udpsrcscale" in test_name:
1115         bsf = u"features-udp"
1116     elif u"iacl" in test_name:
1117         bsf = u"features"
1118     elif u"policer" in test_name:
1119         bsf = u"features"
1120     elif u"adl" in test_name:
1121         bsf = u"features"
1122     elif u"cop" in test_name:
1123         bsf = u"features"
1124     elif u"nat" in test_name:
1125         bsf = u"features"
1126     elif u"macip" in test_name:
1127         bsf = u"features"
1128     elif u"scale" in test_name:
1129         bsf = u"scale"
1130     elif u"base" in test_name:
1131         bsf = u"base"
1132     else:
1133         bsf = u"base"
1134
1135     if u"114b" in test_name and u"vhost" in test_name:
1136         domain = u"vts"
1137     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1138         domain = u"nat44"
1139         if u"nat44det" in test_name:
1140             domain += u"-det-bidir"
1141         else:
1142             domain += u"-ed"
1143         if u"udir" in test_name:
1144             domain += u"-unidir"
1145         elif u"-ethip4udp-" in test_name:
1146             domain += u"-udp"
1147         elif u"-ethip4tcp-" in test_name:
1148             domain += u"-tcp"
1149         if u"-cps" in test_name:
1150             domain += u"-cps"
1151         elif u"-pps" in test_name:
1152             domain += u"-pps"
1153         elif u"-tput" in test_name:
1154             domain += u"-tput"
1155     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1156         domain = u"dpdk"
1157     elif u"memif" in test_name:
1158         domain = u"container_memif"
1159     elif u"srv6" in test_name:
1160         domain = u"srv6"
1161     elif u"vhost" in test_name:
1162         domain = u"vhost"
1163         if u"vppl2xc" in test_name:
1164             driver += u"-vpp"
1165         else:
1166             driver += u"-testpmd"
1167         if u"lbvpplacp" in test_name:
1168             bsf += u"-link-bonding"
1169     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1170         domain = u"nf_service_density_vnfc"
1171     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1172         domain = u"nf_service_density_cnfc"
1173     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1174         domain = u"nf_service_density_cnfp"
1175     elif u"ipsec" in test_name:
1176         domain = u"ipsec"
1177         if u"sw" in test_name:
1178             bsf += u"-sw"
1179         elif u"hw" in test_name:
1180             bsf += u"-hw"
1181     elif u"ethip4vxlan" in test_name:
1182         domain = u"ip4_tunnels"
1183     elif u"ethip4udpgeneve" in test_name:
1184         domain = u"ip4_tunnels"
1185     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1186         domain = u"ip4"
1187     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1188         domain = u"ip6"
1189     elif u"l2xcbase" in test_name or \
1190             u"l2xcscale" in test_name or \
1191             u"l2bdbasemaclrn" in test_name or \
1192             u"l2bdscale" in test_name or \
1193             u"l2patch" in test_name:
1194         domain = u"l2"
1195     else:
1196         domain = u""
1197
1198     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1199     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1200
1201     return file_name + anchor_name
1202
1203
1204 def table_perf_trending_dash_html(table, input_data):
1205     """Generate the table(s) with algorithm:
1206     table_perf_trending_dash_html specified in the specification
1207     file.
1208
1209     :param table: Table to generate.
1210     :param input_data: Data to process.
1211     :type table: dict
1212     :type input_data: InputData
1213     """
1214
1215     _ = input_data
1216
1217     if not table.get(u"testbed", None):
1218         logging.error(
1219             f"The testbed is not defined for the table "
1220             f"{table.get(u'title', u'')}. Skipping."
1221         )
1222         return
1223
1224     test_type = table.get(u"test-type", u"MRR")
1225     if test_type not in (u"MRR", u"NDR", u"PDR"):
1226         logging.error(
1227             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1228             f"Skipping."
1229         )
1230         return
1231
1232     if test_type in (u"NDR", u"PDR"):
1233         lnk_dir = u"../ndrpdr_trending/"
1234         lnk_sufix = f"-{test_type.lower()}"
1235     else:
1236         lnk_dir = u"../trending/"
1237         lnk_sufix = u""
1238
1239     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1240
1241     try:
1242         with open(table[u"input-file"], u'rt') as csv_file:
1243             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1244     except FileNotFoundError as err:
1245         logging.warning(f"{err}")
1246         return
1247     except KeyError:
1248         logging.warning(u"The input file is not defined.")
1249         return
1250     except csv.Error as err:
1251         logging.warning(
1252             f"Not possible to process the file {table[u'input-file']}.\n"
1253             f"{repr(err)}"
1254         )
1255         return
1256
1257     # Table:
1258     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1259
1260     # Table header:
1261     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1262     for idx, item in enumerate(csv_lst[0]):
1263         alignment = u"left" if idx == 0 else u"center"
1264         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1265         thead.text = item
1266
1267     # Rows:
1268     colors = {
1269         u"regression": (
1270             u"#ffcccc",
1271             u"#ff9999"
1272         ),
1273         u"progression": (
1274             u"#c6ecc6",
1275             u"#9fdf9f"
1276         ),
1277         u"normal": (
1278             u"#e9f1fb",
1279             u"#d4e4f7"
1280         )
1281     }
1282     for r_idx, row in enumerate(csv_lst[1:]):
1283         if int(row[4]):
1284             color = u"regression"
1285         elif int(row[5]):
1286             color = u"progression"
1287         else:
1288             color = u"normal"
1289         trow = ET.SubElement(
1290             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1291         )
1292
1293         # Columns:
1294         for c_idx, item in enumerate(row):
1295             tdata = ET.SubElement(
1296                 trow,
1297                 u"td",
1298                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1299             )
1300             # Name:
1301             if c_idx == 0 and table.get(u"add-links", True):
1302                 ref = ET.SubElement(
1303                     tdata,
1304                     u"a",
1305                     attrib=dict(
1306                         href=f"{lnk_dir}"
1307                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1308                         f"{lnk_sufix}"
1309                     )
1310                 )
1311                 ref.text = item
1312             else:
1313                 tdata.text = item
1314     try:
1315         with open(table[u"output-file"], u'w') as html_file:
1316             logging.info(f"    Writing file: {table[u'output-file']}")
1317             html_file.write(u".. raw:: html\n\n\t")
1318             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1319             html_file.write(u"\n\t<p><br><br></p>\n")
1320     except KeyError:
1321         logging.warning(u"The output file is not defined.")
1322         return
1323
1324
1325 def table_last_failed_tests(table, input_data):
1326     """Generate the table(s) with algorithm: table_last_failed_tests
1327     specified in the specification file.
1328
1329     :param table: Table to generate.
1330     :param input_data: Data to process.
1331     :type table: pandas.Series
1332     :type input_data: InputData
1333     """
1334
1335     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1336
1337     # Transform the data
1338     logging.info(
1339         f"    Creating the data set for the {table.get(u'type', u'')} "
1340         f"{table.get(u'title', u'')}."
1341     )
1342
1343     data = input_data.filter_data(table, continue_on_error=True)
1344
1345     if data is None or data.empty:
1346         logging.warning(
1347             f"    No data for the {table.get(u'type', u'')} "
1348             f"{table.get(u'title', u'')}."
1349         )
1350         return
1351
1352     tbl_list = list()
1353     for job, builds in table[u"data"].items():
1354         for build in builds:
1355             build = str(build)
1356             try:
1357                 version = input_data.metadata(job, build).get(u"version", u"")
1358                 duration = \
1359                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1360             except KeyError:
1361                 logging.error(f"Data for {job}: {build} is not present.")
1362                 return
1363             tbl_list.append(build)
1364             tbl_list.append(version)
1365             failed_tests = list()
1366             passed = 0
1367             failed = 0
1368             for tst_data in data[job][build].values:
1369                 if tst_data[u"status"] != u"FAIL":
1370                     passed += 1
1371                     continue
1372                 failed += 1
1373                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1374                 if not groups:
1375                     continue
1376                 nic = groups.group(0)
1377                 msg = tst_data[u'msg'].replace(u"\n", u"")
1378                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1379                              'xxx.xxx.xxx.xxx', msg)
1380                 msg = msg.split(u'Also teardown failed')[0]
1381                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1382             tbl_list.append(passed)
1383             tbl_list.append(failed)
1384             tbl_list.append(duration)
1385             tbl_list.extend(failed_tests)
1386
1387     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1388     logging.info(f"    Writing file: {file_name}")
1389     with open(file_name, u"wt") as file_handler:
1390         for test in tbl_list:
1391             file_handler.write(f"{test}\n")
1392
1393
1394 def table_failed_tests(table, input_data):
1395     """Generate the table(s) with algorithm: table_failed_tests
1396     specified in the specification file.
1397
1398     :param table: Table to generate.
1399     :param input_data: Data to process.
1400     :type table: pandas.Series
1401     :type input_data: InputData
1402     """
1403
1404     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1405
1406     # Transform the data
1407     logging.info(
1408         f"    Creating the data set for the {table.get(u'type', u'')} "
1409         f"{table.get(u'title', u'')}."
1410     )
1411     data = input_data.filter_data(table, continue_on_error=True)
1412
1413     test_type = u"MRR"
1414     if u"NDRPDR" in table.get(u"filter", list()):
1415         test_type = u"NDRPDR"
1416
1417     # Prepare the header of the tables
1418     header = [
1419         u"Test Case",
1420         u"Failures [#]",
1421         u"Last Failure [Time]",
1422         u"Last Failure [VPP-Build-Id]",
1423         u"Last Failure [CSIT-Job-Build-Id]"
1424     ]
1425
1426     # Generate the data for the table according to the model in the table
1427     # specification
1428
1429     now = dt.utcnow()
1430     timeperiod = timedelta(int(table.get(u"window", 7)))
1431
1432     tbl_dict = dict()
1433     for job, builds in table[u"data"].items():
1434         for build in builds:
1435             build = str(build)
1436             for tst_name, tst_data in data[job][build].items():
1437                 if tst_name.lower() in table.get(u"ignore-list", list()):
1438                     continue
1439                 if tbl_dict.get(tst_name, None) is None:
1440                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1441                     if not groups:
1442                         continue
1443                     nic = groups.group(0)
1444                     tbl_dict[tst_name] = {
1445                         u"name": f"{nic}-{tst_data[u'name']}",
1446                         u"data": OrderedDict()
1447                     }
1448                 try:
1449                     generated = input_data.metadata(job, build).\
1450                         get(u"generated", u"")
1451                     if not generated:
1452                         continue
1453                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1454                     if (now - then) <= timeperiod:
1455                         tbl_dict[tst_name][u"data"][build] = (
1456                             tst_data[u"status"],
1457                             generated,
1458                             input_data.metadata(job, build).get(u"version",
1459                                                                 u""),
1460                             build
1461                         )
1462                 except (TypeError, KeyError) as err:
1463                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1464
1465     max_fails = 0
1466     tbl_lst = list()
1467     for tst_data in tbl_dict.values():
1468         fails_nr = 0
1469         fails_last_date = u""
1470         fails_last_vpp = u""
1471         fails_last_csit = u""
1472         for val in tst_data[u"data"].values():
1473             if val[0] == u"FAIL":
1474                 fails_nr += 1
1475                 fails_last_date = val[1]
1476                 fails_last_vpp = val[2]
1477                 fails_last_csit = val[3]
1478         if fails_nr:
1479             max_fails = fails_nr if fails_nr > max_fails else max_fails
1480             tbl_lst.append([
1481                 tst_data[u"name"],
1482                 fails_nr,
1483                 fails_last_date,
1484                 fails_last_vpp,
1485                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1486                 f"-build-{fails_last_csit}"
1487             ])
1488
1489     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1490     tbl_sorted = list()
1491     for nrf in range(max_fails, -1, -1):
1492         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1493         tbl_sorted.extend(tbl_fails)
1494
1495     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1496     logging.info(f"    Writing file: {file_name}")
1497     with open(file_name, u"wt") as file_handler:
1498         file_handler.write(u",".join(header) + u"\n")
1499         for test in tbl_sorted:
1500             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1501
1502     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1503     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1504
1505
1506 def table_failed_tests_html(table, input_data):
1507     """Generate the table(s) with algorithm: table_failed_tests_html
1508     specified in the specification file.
1509
1510     :param table: Table to generate.
1511     :param input_data: Data to process.
1512     :type table: pandas.Series
1513     :type input_data: InputData
1514     """
1515
1516     _ = input_data
1517
1518     if not table.get(u"testbed", None):
1519         logging.error(
1520             f"The testbed is not defined for the table "
1521             f"{table.get(u'title', u'')}. Skipping."
1522         )
1523         return
1524
1525     test_type = table.get(u"test-type", u"MRR")
1526     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1527         logging.error(
1528             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1529             f"Skipping."
1530         )
1531         return
1532
1533     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1534         lnk_dir = u"../ndrpdr_trending/"
1535         lnk_sufix = u"-pdr"
1536     else:
1537         lnk_dir = u"../trending/"
1538         lnk_sufix = u""
1539
1540     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1541
1542     try:
1543         with open(table[u"input-file"], u'rt') as csv_file:
1544             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1545     except KeyError:
1546         logging.warning(u"The input file is not defined.")
1547         return
1548     except csv.Error as err:
1549         logging.warning(
1550             f"Not possible to process the file {table[u'input-file']}.\n"
1551             f"{repr(err)}"
1552         )
1553         return
1554
1555     # Table:
1556     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1557
1558     # Table header:
1559     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1560     for idx, item in enumerate(csv_lst[0]):
1561         alignment = u"left" if idx == 0 else u"center"
1562         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1563         thead.text = item
1564
1565     # Rows:
1566     colors = (u"#e9f1fb", u"#d4e4f7")
1567     for r_idx, row in enumerate(csv_lst[1:]):
1568         background = colors[r_idx % 2]
1569         trow = ET.SubElement(
1570             failed_tests, u"tr", attrib=dict(bgcolor=background)
1571         )
1572
1573         # Columns:
1574         for c_idx, item in enumerate(row):
1575             tdata = ET.SubElement(
1576                 trow,
1577                 u"td",
1578                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1579             )
1580             # Name:
1581             if c_idx == 0 and table.get(u"add-links", True):
1582                 ref = ET.SubElement(
1583                     tdata,
1584                     u"a",
1585                     attrib=dict(
1586                         href=f"{lnk_dir}"
1587                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1588                         f"{lnk_sufix}"
1589                     )
1590                 )
1591                 ref.text = item
1592             else:
1593                 tdata.text = item
1594     try:
1595         with open(table[u"output-file"], u'w') as html_file:
1596             logging.info(f"    Writing file: {table[u'output-file']}")
1597             html_file.write(u".. raw:: html\n\n\t")
1598             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1599             html_file.write(u"\n\t<p><br><br></p>\n")
1600     except KeyError:
1601         logging.warning(u"The output file is not defined.")
1602         return
1603
1604
1605 def table_comparison(table, input_data):
1606     """Generate the table(s) with algorithm: table_comparison
1607     specified in the specification file.
1608
1609     :param table: Table to generate.
1610     :param input_data: Data to process.
1611     :type table: pandas.Series
1612     :type input_data: InputData
1613     """
1614     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1615
1616     # Transform the data
1617     logging.info(
1618         f"    Creating the data set for the {table.get(u'type', u'')} "
1619         f"{table.get(u'title', u'')}."
1620     )
1621
1622     columns = table.get(u"columns", None)
1623     if not columns:
1624         logging.error(
1625             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1626         )
1627         return
1628
1629     cols = list()
1630     for idx, col in enumerate(columns):
1631         if col.get(u"data-set", None) is None:
1632             logging.warning(f"No data for column {col.get(u'title', u'')}")
1633             continue
1634         tag = col.get(u"tag", None)
1635         data = input_data.filter_data(
1636             table,
1637             params=[
1638                 u"throughput",
1639                 u"result",
1640                 u"latency",
1641                 u"name",
1642                 u"parent",
1643                 u"tags"
1644             ],
1645             data=col[u"data-set"],
1646             continue_on_error=True
1647         )
1648         col_data = {
1649             u"title": col.get(u"title", f"Column{idx}"),
1650             u"data": dict()
1651         }
1652         for builds in data.values:
1653             for build in builds:
1654                 for tst_name, tst_data in build.items():
1655                     if tag and tag not in tst_data[u"tags"]:
1656                         continue
1657                     tst_name_mod = \
1658                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1659                         replace(u"2n1l-", u"")
1660                     if col_data[u"data"].get(tst_name_mod, None) is None:
1661                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1662                         if u"across testbeds" in table[u"title"].lower() or \
1663                                 u"across topologies" in table[u"title"].lower():
1664                             name = _tpc_modify_displayed_test_name(name)
1665                         col_data[u"data"][tst_name_mod] = {
1666                             u"name": name,
1667                             u"replace": True,
1668                             u"data": list(),
1669                             u"mean": None,
1670                             u"stdev": None
1671                         }
1672                     _tpc_insert_data(
1673                         target=col_data[u"data"][tst_name_mod],
1674                         src=tst_data,
1675                         include_tests=table[u"include-tests"]
1676                     )
1677
1678         replacement = col.get(u"data-replacement", None)
1679         if replacement:
1680             rpl_data = input_data.filter_data(
1681                 table,
1682                 params=[
1683                     u"throughput",
1684                     u"result",
1685                     u"latency",
1686                     u"name",
1687                     u"parent",
1688                     u"tags"
1689                 ],
1690                 data=replacement,
1691                 continue_on_error=True
1692             )
1693             for builds in rpl_data.values:
1694                 for build in builds:
1695                     for tst_name, tst_data in build.items():
1696                         if tag and tag not in tst_data[u"tags"]:
1697                             continue
1698                         tst_name_mod = \
1699                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1700                             replace(u"2n1l-", u"")
1701                         if col_data[u"data"].get(tst_name_mod, None) is None:
1702                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1703                             if u"across testbeds" in table[u"title"].lower() \
1704                                     or u"across topologies" in \
1705                                     table[u"title"].lower():
1706                                 name = _tpc_modify_displayed_test_name(name)
1707                             col_data[u"data"][tst_name_mod] = {
1708                                 u"name": name,
1709                                 u"replace": False,
1710                                 u"data": list(),
1711                                 u"mean": None,
1712                                 u"stdev": None
1713                             }
1714                         if col_data[u"data"][tst_name_mod][u"replace"]:
1715                             col_data[u"data"][tst_name_mod][u"replace"] = False
1716                             col_data[u"data"][tst_name_mod][u"data"] = list()
1717                         _tpc_insert_data(
1718                             target=col_data[u"data"][tst_name_mod],
1719                             src=tst_data,
1720                             include_tests=table[u"include-tests"]
1721                         )
1722
1723         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1724                 u"latency" in table[u"include-tests"]:
1725             for tst_name, tst_data in col_data[u"data"].items():
1726                 if tst_data[u"data"]:
1727                     tst_data[u"mean"] = mean(tst_data[u"data"])
1728                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1729
1730         cols.append(col_data)
1731
1732     tbl_dict = dict()
1733     for col in cols:
1734         for tst_name, tst_data in col[u"data"].items():
1735             if tbl_dict.get(tst_name, None) is None:
1736                 tbl_dict[tst_name] = {
1737                     "name": tst_data[u"name"]
1738                 }
1739             tbl_dict[tst_name][col[u"title"]] = {
1740                 u"mean": tst_data[u"mean"],
1741                 u"stdev": tst_data[u"stdev"]
1742             }
1743
1744     if not tbl_dict:
1745         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1746         return
1747
1748     tbl_lst = list()
1749     for tst_data in tbl_dict.values():
1750         row = [tst_data[u"name"], ]
1751         for col in cols:
1752             row.append(tst_data.get(col[u"title"], None))
1753         tbl_lst.append(row)
1754
1755     comparisons = table.get(u"comparisons", None)
1756     rcas = list()
1757     if comparisons and isinstance(comparisons, list):
1758         for idx, comp in enumerate(comparisons):
1759             try:
1760                 col_ref = int(comp[u"reference"])
1761                 col_cmp = int(comp[u"compare"])
1762             except KeyError:
1763                 logging.warning(u"Comparison: No references defined! Skipping.")
1764                 comparisons.pop(idx)
1765                 continue
1766             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1767                     col_ref == col_cmp):
1768                 logging.warning(f"Wrong values of reference={col_ref} "
1769                                 f"and/or compare={col_cmp}. Skipping.")
1770                 comparisons.pop(idx)
1771                 continue
1772             rca_file_name = comp.get(u"rca-file", None)
1773             if rca_file_name:
1774                 try:
1775                     with open(rca_file_name, u"r") as file_handler:
1776                         rcas.append(
1777                             {
1778                                 u"title": f"RCA{idx + 1}",
1779                                 u"data": load(file_handler, Loader=FullLoader)
1780                             }
1781                         )
1782                 except (YAMLError, IOError) as err:
1783                     logging.warning(
1784                         f"The RCA file {rca_file_name} does not exist or "
1785                         f"it is corrupted!"
1786                     )
1787                     logging.debug(repr(err))
1788                     rcas.append(None)
1789             else:
1790                 rcas.append(None)
1791     else:
1792         comparisons = None
1793
1794     tbl_cmp_lst = list()
1795     if comparisons:
1796         for row in tbl_lst:
1797             new_row = deepcopy(row)
1798             for comp in comparisons:
1799                 ref_itm = row[int(comp[u"reference"])]
1800                 if ref_itm is None and \
1801                         comp.get(u"reference-alt", None) is not None:
1802                     ref_itm = row[int(comp[u"reference-alt"])]
1803                 cmp_itm = row[int(comp[u"compare"])]
1804                 if ref_itm is not None and cmp_itm is not None and \
1805                         ref_itm[u"mean"] is not None and \
1806                         cmp_itm[u"mean"] is not None and \
1807                         ref_itm[u"stdev"] is not None and \
1808                         cmp_itm[u"stdev"] is not None:
1809                     try:
1810                         delta, d_stdev = relative_change_stdev(
1811                             ref_itm[u"mean"], cmp_itm[u"mean"],
1812                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1813                         )
1814                     except ZeroDivisionError:
1815                         break
1816                     if delta is None or math.isnan(delta):
1817                         break
1818                     new_row.append({
1819                         u"mean": delta * 1e6,
1820                         u"stdev": d_stdev * 1e6
1821                     })
1822                 else:
1823                     break
1824             else:
1825                 tbl_cmp_lst.append(new_row)
1826
1827     try:
1828         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1829         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1830     except TypeError as err:
1831         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1832
1833     tbl_for_csv = list()
1834     for line in tbl_cmp_lst:
1835         row = [line[0], ]
1836         for idx, itm in enumerate(line[1:]):
1837             if itm is None or not isinstance(itm, dict) or\
1838                     itm.get(u'mean', None) is None or \
1839                     itm.get(u'stdev', None) is None:
1840                 row.append(u"NT")
1841                 row.append(u"NT")
1842             else:
1843                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1844                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1845         for rca in rcas:
1846             if rca is None:
1847                 continue
1848             rca_nr = rca[u"data"].get(row[0], u"-")
1849             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1850         tbl_for_csv.append(row)
1851
1852     header_csv = [u"Test Case", ]
1853     for col in cols:
1854         header_csv.append(f"Avg({col[u'title']})")
1855         header_csv.append(f"Stdev({col[u'title']})")
1856     for comp in comparisons:
1857         header_csv.append(
1858             f"Avg({comp.get(u'title', u'')})"
1859         )
1860         header_csv.append(
1861             f"Stdev({comp.get(u'title', u'')})"
1862         )
1863     for rca in rcas:
1864         if rca:
1865             header_csv.append(rca[u"title"])
1866
1867     legend_lst = table.get(u"legend", None)
1868     if legend_lst is None:
1869         legend = u""
1870     else:
1871         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1872
1873     footnote = u""
1874     if rcas and any(rcas):
1875         footnote += u"\nRoot Cause Analysis:\n"
1876         for rca in rcas:
1877             if rca:
1878                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1879
1880     csv_file_name = f"{table[u'output-file']}-csv.csv"
1881     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1882         file_handler.write(
1883             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1884         )
1885         for test in tbl_for_csv:
1886             file_handler.write(
1887                 u",".join([f'"{item}"' for item in test]) + u"\n"
1888             )
1889         if legend_lst:
1890             for item in legend_lst:
1891                 file_handler.write(f'"{item}"\n')
1892         if footnote:
1893             for itm in footnote.split(u"\n"):
1894                 file_handler.write(f'"{itm}"\n')
1895
1896     tbl_tmp = list()
1897     max_lens = [0, ] * len(tbl_cmp_lst[0])
1898     for line in tbl_cmp_lst:
1899         row = [line[0], ]
1900         for idx, itm in enumerate(line[1:]):
1901             if itm is None or not isinstance(itm, dict) or \
1902                     itm.get(u'mean', None) is None or \
1903                     itm.get(u'stdev', None) is None:
1904                 new_itm = u"NT"
1905             else:
1906                 if idx < len(cols):
1907                     new_itm = (
1908                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1909                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1910                         replace(u"nan", u"NaN")
1911                     )
1912                 else:
1913                     new_itm = (
1914                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1915                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1916                         replace(u"nan", u"NaN")
1917                     )
1918             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1919                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1920             row.append(new_itm)
1921
1922         tbl_tmp.append(row)
1923
1924     header = [u"Test Case", ]
1925     header.extend([col[u"title"] for col in cols])
1926     header.extend([comp.get(u"title", u"") for comp in comparisons])
1927
1928     tbl_final = list()
1929     for line in tbl_tmp:
1930         row = [line[0], ]
1931         for idx, itm in enumerate(line[1:]):
1932             if itm in (u"NT", u"NaN"):
1933                 row.append(itm)
1934                 continue
1935             itm_lst = itm.rsplit(u"\u00B1", 1)
1936             itm_lst[-1] = \
1937                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1938             itm_str = u"\u00B1".join(itm_lst)
1939
1940             if idx >= len(cols):
1941                 # Diffs
1942                 rca = rcas[idx - len(cols)]
1943                 if rca:
1944                     # Add rcas to diffs
1945                     rca_nr = rca[u"data"].get(row[0], None)
1946                     if rca_nr:
1947                         hdr_len = len(header[idx + 1]) - 1
1948                         if hdr_len < 19:
1949                             hdr_len = 19
1950                         rca_nr = f"[{rca_nr}]"
1951                         itm_str = (
1952                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1953                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1954                             f"{itm_str}"
1955                         )
1956             row.append(itm_str)
1957         tbl_final.append(row)
1958
1959     # Generate csv tables:
1960     csv_file_name = f"{table[u'output-file']}.csv"
1961     logging.info(f"    Writing the file {csv_file_name}")
1962     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1963         file_handler.write(u";".join(header) + u"\n")
1964         for test in tbl_final:
1965             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1966
1967     # Generate txt table:
1968     txt_file_name = f"{table[u'output-file']}.txt"
1969     logging.info(f"    Writing the file {txt_file_name}")
1970     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1971
1972     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1973         file_handler.write(legend)
1974         file_handler.write(footnote)
1975
1976     # Generate html table:
1977     _tpc_generate_html_table(
1978         header,
1979         tbl_final,
1980         table[u'output-file'],
1981         legend=legend,
1982         footnote=footnote,
1983         sort_data=False,
1984         title=table.get(u"title", u"")
1985     )
1986
1987
1988 def table_weekly_comparison(table, in_data):
1989     """Generate the table(s) with algorithm: table_weekly_comparison
1990     specified in the specification file.
1991
1992     :param table: Table to generate.
1993     :param in_data: Data to process.
1994     :type table: pandas.Series
1995     :type in_data: InputData
1996     """
1997     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1998
1999     # Transform the data
2000     logging.info(
2001         f"    Creating the data set for the {table.get(u'type', u'')} "
2002         f"{table.get(u'title', u'')}."
2003     )
2004
2005     incl_tests = table.get(u"include-tests", None)
2006     if incl_tests not in (u"NDR", u"PDR"):
2007         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2008         return
2009
2010     nr_cols = table.get(u"nr-of-data-columns", None)
2011     if not nr_cols or nr_cols < 2:
2012         logging.error(
2013             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2014         )
2015         return
2016
2017     data = in_data.filter_data(
2018         table,
2019         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2020         continue_on_error=True
2021     )
2022
2023     header = [
2024         [u"VPP Version", ],
2025         [u"Start Timestamp", ],
2026         [u"CSIT Build", ],
2027         [u"CSIT Testbed", ]
2028     ]
2029     tbl_dict = dict()
2030     idx = 0
2031     tb_tbl = table.get(u"testbeds", None)
2032     for job_name, job_data in data.items():
2033         for build_nr, build in job_data.items():
2034             if idx >= nr_cols:
2035                 break
2036             if build.empty:
2037                 continue
2038
2039             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2040             if tb_ip and tb_tbl:
2041                 testbed = tb_tbl.get(tb_ip, u"")
2042             else:
2043                 testbed = u""
2044             header[2].insert(1, build_nr)
2045             header[3].insert(1, testbed)
2046             header[1].insert(
2047                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2048             )
2049             header[0].insert(
2050                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2051             )
2052
2053             for tst_name, tst_data in build.items():
2054                 tst_name_mod = \
2055                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2056                 if not tbl_dict.get(tst_name_mod, None):
2057                     tbl_dict[tst_name_mod] = dict(
2058                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2059                     )
2060                 try:
2061                     tbl_dict[tst_name_mod][-idx - 1] = \
2062                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2063                 except (TypeError, IndexError, KeyError, ValueError):
2064                     pass
2065             idx += 1
2066
2067     if idx < nr_cols:
2068         logging.error(u"Not enough data to build the table! Skipping")
2069         return
2070
2071     cmp_dict = dict()
2072     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2073         idx_ref = cmp.get(u"reference", None)
2074         idx_cmp = cmp.get(u"compare", None)
2075         if idx_ref is None or idx_cmp is None:
2076             continue
2077         header[0].append(
2078             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2079             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2080         )
2081         header[1].append(u"")
2082         header[2].append(u"")
2083         header[3].append(u"")
2084         for tst_name, tst_data in tbl_dict.items():
2085             if not cmp_dict.get(tst_name, None):
2086                 cmp_dict[tst_name] = list()
2087             ref_data = tst_data.get(idx_ref, None)
2088             cmp_data = tst_data.get(idx_cmp, None)
2089             if ref_data is None or cmp_data is None:
2090                 cmp_dict[tst_name].append(float(u'nan'))
2091             else:
2092                 cmp_dict[tst_name].append(
2093                     relative_change(ref_data, cmp_data)
2094                 )
2095
2096     tbl_lst_none = list()
2097     tbl_lst = list()
2098     for tst_name, tst_data in tbl_dict.items():
2099         itm_lst = [tst_data[u"name"], ]
2100         for idx in range(nr_cols):
2101             item = tst_data.get(-idx - 1, None)
2102             if item is None:
2103                 itm_lst.insert(1, None)
2104             else:
2105                 itm_lst.insert(1, round(item / 1e6, 1))
2106         itm_lst.extend(
2107             [
2108                 None if itm is None else round(itm, 1)
2109                 for itm in cmp_dict[tst_name]
2110             ]
2111         )
2112         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2113             tbl_lst_none.append(itm_lst)
2114         else:
2115             tbl_lst.append(itm_lst)
2116
2117     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2118     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2119     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2120     tbl_lst.extend(tbl_lst_none)
2121
2122     # Generate csv table:
2123     csv_file_name = f"{table[u'output-file']}.csv"
2124     logging.info(f"    Writing the file {csv_file_name}")
2125     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2126         for hdr in header:
2127             file_handler.write(u",".join(hdr) + u"\n")
2128         for test in tbl_lst:
2129             file_handler.write(u",".join(
2130                 [
2131                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2132                     replace(u"null", u"-") for item in test
2133                 ]
2134             ) + u"\n")
2135
2136     txt_file_name = f"{table[u'output-file']}.txt"
2137     logging.info(f"    Writing the file {txt_file_name}")
2138     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2139
2140     # Reorganize header in txt table
2141     txt_table = list()
2142     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2143         for line in list(file_handler):
2144             txt_table.append(line)
2145     try:
2146         txt_table.insert(5, txt_table.pop(2))
2147         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2148             file_handler.writelines(txt_table)
2149     except IndexError:
2150         pass
2151
2152     # Generate html table:
2153     hdr_html = [
2154         u"<br>".join(row) for row in zip(*header)
2155     ]
2156     _tpc_generate_html_table(
2157         hdr_html,
2158         tbl_lst,
2159         table[u'output-file'],
2160         sort_data=True,
2161         title=table.get(u"title", u""),
2162         generate_rst=False
2163     )