Report: Add Latency comparison tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
35
36 from pal_utils import mean, stdev, classify_anomalies, \
37     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
38
39
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41
42
43 def generate_tables(spec, data):
44     """Generate all tables specified in the specification file.
45
46     :param spec: Specification read from the specification file.
47     :param data: Data to process.
48     :type spec: Specification
49     :type data: InputData
50     """
51
52     generator = {
53         u"table_merged_details": table_merged_details,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html,
61         u"table_comparison": table_comparison,
62         u"table_weekly_comparison": table_weekly_comparison
63     }
64
65     logging.info(u"Generating the tables ...")
66     for table in spec.tables:
67         try:
68             if table[u"algorithm"] == u"table_weekly_comparison":
69                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70             generator[table[u"algorithm"]](table, data)
71         except NameError as err:
72             logging.error(
73                 f"Probably algorithm {table[u'algorithm']} is not defined: "
74                 f"{repr(err)}"
75             )
76     logging.info(u"Done.")
77
78
79 def table_oper_data_html(table, input_data):
80     """Generate the table(s) with algorithm: html_table_oper_data
81     specified in the specification file.
82
83     :param table: Table to generate.
84     :param input_data: Data to process.
85     :type table: pandas.Series
86     :type input_data: InputData
87     """
88
89     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
90     # Transform the data
91     logging.info(
92         f"    Creating the data set for the {table.get(u'type', u'')} "
93         f"{table.get(u'title', u'')}."
94     )
95     data = input_data.filter_data(
96         table,
97         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98         continue_on_error=True
99     )
100     if data.empty:
101         return
102     data = input_data.merge_data(data)
103
104     sort_tests = table.get(u"sort", None)
105     if sort_tests:
106         args = dict(
107             inplace=True,
108             ascending=(sort_tests == u"ascending")
109         )
110         data.sort_index(**args)
111
112     suites = input_data.filter_data(
113         table,
114         continue_on_error=True,
115         data_set=u"suites"
116     )
117     if suites.empty:
118         return
119     suites = input_data.merge_data(suites)
120
121     def _generate_html_table(tst_data):
122         """Generate an HTML table with operational data for the given test.
123
124         :param tst_data: Test data to be used to generate the table.
125         :type tst_data: pandas.Series
126         :returns: HTML table with operational data.
127         :rtype: str
128         """
129
130         colors = {
131             u"header": u"#7eade7",
132             u"empty": u"#ffffff",
133             u"body": (u"#e9f1fb", u"#d4e4f7")
134         }
135
136         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137
138         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139         thead = ET.SubElement(
140             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141         )
142         thead.text = tst_data[u"name"]
143
144         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145         thead = ET.SubElement(
146             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147         )
148         thead.text = u"\t"
149
150         if tst_data.get(u"telemetry-show-run", None) is None or \
151                 isinstance(tst_data[u"telemetry-show-run"], str):
152             trow = ET.SubElement(
153                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
154             )
155             tcol = ET.SubElement(
156                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
157             )
158             tcol.text = u"No Data"
159
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
162             )
163             thead = ET.SubElement(
164                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
165             )
166             font = ET.SubElement(
167                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168             )
169             font.text = u"."
170             return str(ET.tostring(tbl, encoding=u"unicode"))
171
172         tbl_hdr = (
173             u"Name",
174             u"Nr of Vectors",
175             u"Nr of Packets",
176             u"Suspends",
177             u"Cycles per Packet",
178             u"Average Vector Size"
179         )
180
181         for dut_data in tst_data[u"telemetry-show-run"].values():
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
184             )
185             tcol = ET.SubElement(
186                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
187             )
188             if dut_data.get(u"runtime", None) is None:
189                 tcol.text = u"No Data"
190                 continue
191
192             runtime = dict()
193             for item in dut_data[u"runtime"].get(u"data", tuple()):
194                 tid = int(item[u"labels"][u"thread_id"])
195                 if runtime.get(tid, None) is None:
196                     runtime[tid] = dict()
197                 gnode = item[u"labels"][u"graph_node"]
198                 if runtime[tid].get(gnode, None) is None:
199                     runtime[tid][gnode] = dict()
200                 try:
201                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
202                 except ValueError:
203                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
204
205             threads = dict({idx: list() for idx in range(len(runtime))})
206             for idx, run_data in runtime.items():
207                 for gnode, gdata in run_data.items():
208                     if gdata[u"vectors"] > 0:
209                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
210                     elif gdata[u"calls"] > 0:
211                         clocks = gdata[u"clocks"] / gdata[u"calls"]
212                     elif gdata[u"suspends"] > 0:
213                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
214                     else:
215                         clocks = 0.0
216                     if gdata[u"calls"] > 0:
217                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
218                     else:
219                         vectors_call = 0.0
220                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221                             int(gdata[u"suspends"]):
222                         threads[idx].append([
223                             gnode,
224                             int(gdata[u"calls"]),
225                             int(gdata[u"vectors"]),
226                             int(gdata[u"suspends"]),
227                             clocks,
228                             vectors_call
229                         ])
230
231             bold = ET.SubElement(tcol, u"b")
232             bold.text = (
233                 f"Host IP: {dut_data.get(u'host', '')}, "
234                 f"Socket: {dut_data.get(u'socket', '')}"
235             )
236             trow = ET.SubElement(
237                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238             )
239             thead = ET.SubElement(
240                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241             )
242             thead.text = u"\t"
243
244             for thread_nr, thread in threads.items():
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247                 )
248                 tcol = ET.SubElement(
249                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 bold = ET.SubElement(tcol, u"b")
252                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253                 trow = ET.SubElement(
254                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255                 )
256                 for idx, col in enumerate(tbl_hdr):
257                     tcol = ET.SubElement(
258                         trow, u"td",
259                         attrib=dict(align=u"right" if idx else u"left")
260                     )
261                     font = ET.SubElement(
262                         tcol, u"font", attrib=dict(size=u"2")
263                     )
264                     bold = ET.SubElement(font, u"b")
265                     bold.text = col
266                 for row_nr, row in enumerate(thread):
267                     trow = ET.SubElement(
268                         tbl, u"tr",
269                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
270                     )
271                     for idx, col in enumerate(row):
272                         tcol = ET.SubElement(
273                             trow, u"td",
274                             attrib=dict(align=u"right" if idx else u"left")
275                         )
276                         font = ET.SubElement(
277                             tcol, u"font", attrib=dict(size=u"2")
278                         )
279                         if isinstance(col, float):
280                             font.text = f"{col:.2f}"
281                         else:
282                             font.text = str(col)
283                 trow = ET.SubElement(
284                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
285                 )
286                 thead = ET.SubElement(
287                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288                 )
289                 thead.text = u"\t"
290
291         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292         thead = ET.SubElement(
293             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
294         )
295         font = ET.SubElement(
296             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
297         )
298         font.text = u"."
299
300         return str(ET.tostring(tbl, encoding=u"unicode"))
301
302     for suite in suites.values:
303         html_table = str()
304         for test_data in data.values:
305             if test_data[u"parent"] not in suite[u"name"]:
306                 continue
307             html_table += _generate_html_table(test_data)
308         if not html_table:
309             continue
310         try:
311             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312             with open(f"{file_name}", u'w') as html_file:
313                 logging.info(f"    Writing file: {file_name}")
314                 html_file.write(u".. raw:: html\n\n\t")
315                 html_file.write(html_table)
316                 html_file.write(u"\n\t<p><br><br></p>\n")
317         except KeyError:
318             logging.warning(u"The output file is not defined.")
319             return
320     logging.info(u"  Done.")
321
322
323 def table_merged_details(table, input_data):
324     """Generate the table(s) with algorithm: table_merged_details
325     specified in the specification file.
326
327     :param table: Table to generate.
328     :param input_data: Data to process.
329     :type table: pandas.Series
330     :type input_data: InputData
331     """
332
333     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
334
335     # Transform the data
336     logging.info(
337         f"    Creating the data set for the {table.get(u'type', u'')} "
338         f"{table.get(u'title', u'')}."
339     )
340     data = input_data.filter_data(table, continue_on_error=True)
341     data = input_data.merge_data(data)
342
343     sort_tests = table.get(u"sort", None)
344     if sort_tests:
345         args = dict(
346             inplace=True,
347             ascending=(sort_tests == u"ascending")
348         )
349         data.sort_index(**args)
350
351     suites = input_data.filter_data(
352         table, continue_on_error=True, data_set=u"suites")
353     suites = input_data.merge_data(suites)
354
355     # Prepare the header of the tables
356     header = list()
357     for column in table[u"columns"]:
358         header.append(
359             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
360         )
361
362     for suite in suites.values:
363         # Generate data
364         suite_name = suite[u"name"]
365         table_lst = list()
366         for test in data.keys():
367             if data[test][u"status"] != u"PASS" or \
368                     data[test][u"parent"] not in suite_name:
369                 continue
370             row_lst = list()
371             for column in table[u"columns"]:
372                 try:
373                     col_data = str(data[test][column[
374                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
375                     # Do not include tests with "Test Failed" in test message
376                     if u"Test Failed" in col_data:
377                         continue
378                     col_data = col_data.replace(
379                         u"No Data", u"Not Captured     "
380                     )
381                     if column[u"data"].split(u" ")[1] in (u"name", ):
382                         if len(col_data) > 30:
383                             col_data_lst = col_data.split(u"-")
384                             half = int(len(col_data_lst) / 2)
385                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
386                                        f"- |br| " \
387                                        f"{u'-'.join(col_data_lst[half:])}"
388                         col_data = f" |prein| {col_data} |preout| "
389                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
390                         # Temporary solution: remove NDR results from message:
391                         if bool(table.get(u'remove-ndr', False)):
392                             try:
393                                 col_data = col_data.split(u"\n", 1)[1]
394                             except IndexError:
395                                 pass
396                         col_data = col_data.replace(u'\n', u' |br| ').\
397                             replace(u'\r', u'').replace(u'"', u"'")
398                         col_data = f" |prein| {col_data} |preout| "
399                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400                         col_data = col_data.replace(u'\n', u' |br| ')
401                         col_data = f" |prein| {col_data[:-5]} |preout| "
402                     row_lst.append(f'"{col_data}"')
403                 except KeyError:
404                     row_lst.append(u'"Not captured"')
405             if len(row_lst) == len(table[u"columns"]):
406                 table_lst.append(row_lst)
407
408         # Write the data to file
409         if table_lst:
410             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412             logging.info(f"      Writing file: {file_name}")
413             with open(file_name, u"wt") as file_handler:
414                 file_handler.write(u",".join(header) + u"\n")
415                 for item in table_lst:
416                     file_handler.write(u",".join(item) + u"\n")
417
418     logging.info(u"  Done.")
419
420
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422     """Modify a test name by replacing its parts.
423
424     :param test_name: Test name to be modified.
425     :param ignore_nic: If True, NIC is removed from TC name.
426     :type test_name: str
427     :type ignore_nic: bool
428     :returns: Modified test name.
429     :rtype: str
430     """
431     test_name_mod = test_name.\
432         replace(u"-ndrpdr", u"").\
433         replace(u"1t1c", u"1c").\
434         replace(u"2t1c", u"1c"). \
435         replace(u"2t2c", u"2c").\
436         replace(u"4t2c", u"2c"). \
437         replace(u"4t4c", u"4c").\
438         replace(u"8t4c", u"4c")
439
440     if ignore_nic:
441         return re.sub(REGEX_NIC, u"", test_name_mod)
442     return test_name_mod
443
444
445 def _tpc_modify_displayed_test_name(test_name):
446     """Modify a test name which is displayed in a table by replacing its parts.
447
448     :param test_name: Test name to be modified.
449     :type test_name: str
450     :returns: Modified test name.
451     :rtype: str
452     """
453     return test_name.\
454         replace(u"1t1c", u"1c").\
455         replace(u"2t1c", u"1c"). \
456         replace(u"2t2c", u"2c").\
457         replace(u"4t2c", u"2c"). \
458         replace(u"4t4c", u"4c").\
459         replace(u"8t4c", u"4c")
460
461
462 def _tpc_insert_data(target, src, include_tests):
463     """Insert src data to the target structure.
464
465     :param target: Target structure where the data is placed.
466     :param src: Source data to be placed into the target structure.
467     :param include_tests: Which results will be included (MRR, NDR, PDR).
468     :type target: list
469     :type src: dict
470     :type include_tests: str
471     """
472     try:
473         if include_tests == u"MRR":
474             target[u"mean"] = src[u"result"][u"receive-rate"]
475             target[u"stdev"] = src[u"result"][u"receive-stdev"]
476         elif include_tests == u"PDR":
477             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478         elif include_tests == u"NDR":
479             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480         elif u"latency" in include_tests:
481             keys = include_tests.split(u"-")
482             if len(keys) == 4:
483                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
484                 target[u"data"].append(
485                     float(u"nan") if lat == -1 else lat * 1e6
486                 )
487     except (KeyError, TypeError):
488         pass
489
490
491 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
492                              footnote=u"", sort_data=True, title=u"",
493                              generate_rst=True):
494     """Generate html table from input data with simple sorting possibility.
495
496     :param header: Table header.
497     :param data: Input data to be included in the table. It is a list of lists.
498         Inner lists are rows in the table. All inner lists must be of the same
499         length. The length of these lists must be the same as the length of the
500         header.
501     :param out_file_name: The name (relative or full path) where the
502         generated html table is written.
503     :param legend: The legend to display below the table.
504     :param footnote: The footnote to display below the table (and legend).
505     :param sort_data: If True the data sorting is enabled.
506     :param title: The table (and file) title.
507     :param generate_rst: If True, wrapping rst file is generated.
508     :type header: list
509     :type data: list of lists
510     :type out_file_name: str
511     :type legend: str
512     :type footnote: str
513     :type sort_data: bool
514     :type title: str
515     :type generate_rst: bool
516     """
517
518     try:
519         idx = header.index(u"Test Case")
520     except ValueError:
521         idx = 0
522     params = {
523         u"align-hdr": (
524             [u"left", u"right"],
525             [u"left", u"left", u"right"],
526             [u"left", u"left", u"left", u"right"]
527         ),
528         u"align-itm": (
529             [u"left", u"right"],
530             [u"left", u"left", u"right"],
531             [u"left", u"left", u"left", u"right"]
532         ),
533         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
534     }
535
536     df_data = pd.DataFrame(data, columns=header)
537
538     if sort_data:
539         df_sorted = [df_data.sort_values(
540             by=[key, header[idx]], ascending=[True, True]
541             if key != header[idx] else [False, True]) for key in header]
542         df_sorted_rev = [df_data.sort_values(
543             by=[key, header[idx]], ascending=[False, True]
544             if key != header[idx] else [True, True]) for key in header]
545         df_sorted.extend(df_sorted_rev)
546     else:
547         df_sorted = df_data
548
549     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
550                    for idx in range(len(df_data))]]
551     table_header = dict(
552         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
553         fill_color=u"#7eade7",
554         align=params[u"align-hdr"][idx],
555         font=dict(
556             family=u"Courier New",
557             size=12
558         )
559     )
560
561     fig = go.Figure()
562
563     if sort_data:
564         for table in df_sorted:
565             columns = [table.get(col) for col in header]
566             fig.add_trace(
567                 go.Table(
568                     columnwidth=params[u"width"][idx],
569                     header=table_header,
570                     cells=dict(
571                         values=columns,
572                         fill_color=fill_color,
573                         align=params[u"align-itm"][idx],
574                         font=dict(
575                             family=u"Courier New",
576                             size=12
577                         )
578                     )
579                 )
580             )
581
582         buttons = list()
583         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
585         for idx, hdr in enumerate(menu_items):
586             visible = [False, ] * len(menu_items)
587             visible[idx] = True
588             buttons.append(
589                 dict(
590                     label=hdr.replace(u" [Mpps]", u""),
591                     method=u"update",
592                     args=[{u"visible": visible}],
593                 )
594             )
595
596         fig.update_layout(
597             updatemenus=[
598                 go.layout.Updatemenu(
599                     type=u"dropdown",
600                     direction=u"down",
601                     x=0.0,
602                     xanchor=u"left",
603                     y=1.002,
604                     yanchor=u"bottom",
605                     active=len(menu_items) - 1,
606                     buttons=list(buttons)
607                 )
608             ],
609         )
610     else:
611         fig.add_trace(
612             go.Table(
613                 columnwidth=params[u"width"][idx],
614                 header=table_header,
615                 cells=dict(
616                     values=[df_sorted.get(col) for col in header],
617                     fill_color=fill_color,
618                     align=params[u"align-itm"][idx],
619                     font=dict(
620                         family=u"Courier New",
621                         size=12
622                     )
623                 )
624             )
625         )
626
627     ploff.plot(
628         fig,
629         show_link=False,
630         auto_open=False,
631         filename=f"{out_file_name}_in.html"
632     )
633
634     if not generate_rst:
635         return
636
637     file_name = out_file_name.split(u"/")[-1]
638     if u"vpp" in out_file_name:
639         path = u"_tmp/src/vpp_performance_tests/comparisons/"
640     else:
641         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
642     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
643     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
644         rst_file.write(
645             u"\n"
646             u".. |br| raw:: html\n\n    <br />\n\n\n"
647             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
648             u".. |preout| raw:: html\n\n    </pre>\n\n"
649         )
650         if title:
651             rst_file.write(f"{title}\n")
652             rst_file.write(f"{u'`' * len(title)}\n\n")
653         rst_file.write(
654             u".. raw:: html\n\n"
655             f'    <iframe frameborder="0" scrolling="no" '
656             f'width="1600" height="1200" '
657             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
658             f'</iframe>\n\n'
659         )
660
661         if legend:
662             try:
663                 itm_lst = legend[1:-2].split(u"\n")
664                 rst_file.write(
665                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
666                 )
667             except IndexError as err:
668                 logging.error(f"Legend cannot be written to html file\n{err}")
669         if footnote:
670             try:
671                 itm_lst = footnote[1:].split(u"\n")
672                 rst_file.write(
673                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
674                 )
675             except IndexError as err:
676                 logging.error(f"Footnote cannot be written to html file\n{err}")
677
678
679 def table_soak_vs_ndr(table, input_data):
680     """Generate the table(s) with algorithm: table_soak_vs_ndr
681     specified in the specification file.
682
683     :param table: Table to generate.
684     :param input_data: Data to process.
685     :type table: pandas.Series
686     :type input_data: InputData
687     """
688
689     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
690
691     # Transform the data
692     logging.info(
693         f"    Creating the data set for the {table.get(u'type', u'')} "
694         f"{table.get(u'title', u'')}."
695     )
696     data = input_data.filter_data(table, continue_on_error=True)
697
698     # Prepare the header of the table
699     try:
700         header = [
701             u"Test Case",
702             f"Avg({table[u'reference'][u'title']})",
703             f"Stdev({table[u'reference'][u'title']})",
704             f"Avg({table[u'compare'][u'title']})",
705             f"Stdev{table[u'compare'][u'title']})",
706             u"Diff",
707             u"Stdev(Diff)"
708         ]
709         header_str = u";".join(header) + u"\n"
710         legend = (
711             u"\nLegend:\n"
712             f"Avg({table[u'reference'][u'title']}): "
713             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
714             f"from a series of runs of the listed tests.\n"
715             f"Stdev({table[u'reference'][u'title']}): "
716             f"Standard deviation value of {table[u'reference'][u'title']} "
717             f"[Mpps] computed from a series of runs of the listed tests.\n"
718             f"Avg({table[u'compare'][u'title']}): "
719             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
720             f"a series of runs of the listed tests.\n"
721             f"Stdev({table[u'compare'][u'title']}): "
722             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
723             f"computed from a series of runs of the listed tests.\n"
724             f"Diff({table[u'reference'][u'title']},"
725             f"{table[u'compare'][u'title']}): "
726             f"Percentage change calculated for mean values.\n"
727             u"Stdev(Diff): "
728             u"Standard deviation of percentage change calculated for mean "
729             u"values."
730         )
731     except (AttributeError, KeyError) as err:
732         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
733         return
734
735     # Create a list of available SOAK test results:
736     tbl_dict = dict()
737     for job, builds in table[u"compare"][u"data"].items():
738         for build in builds:
739             for tst_name, tst_data in data[job][str(build)].items():
740                 if tst_data[u"type"] == u"SOAK":
741                     tst_name_mod = tst_name.replace(u"-soak", u"")
742                     if tbl_dict.get(tst_name_mod, None) is None:
743                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
744                         nic = groups.group(0) if groups else u""
745                         name = (
746                             f"{nic}-"
747                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
748                         )
749                         tbl_dict[tst_name_mod] = {
750                             u"name": name,
751                             u"ref-data": list(),
752                             u"cmp-data": list()
753                         }
754                     try:
755                         tbl_dict[tst_name_mod][u"cmp-data"].append(
756                             tst_data[u"throughput"][u"LOWER"])
757                     except (KeyError, TypeError):
758                         pass
759     tests_lst = tbl_dict.keys()
760
761     # Add corresponding NDR test results:
762     for job, builds in table[u"reference"][u"data"].items():
763         for build in builds:
764             for tst_name, tst_data in data[job][str(build)].items():
765                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
766                     replace(u"-mrr", u"")
767                 if tst_name_mod not in tests_lst:
768                     continue
769                 try:
770                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
771                         continue
772                     if table[u"include-tests"] == u"MRR":
773                         result = (tst_data[u"result"][u"receive-rate"],
774                                   tst_data[u"result"][u"receive-stdev"])
775                     elif table[u"include-tests"] == u"PDR":
776                         result = \
777                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
778                     elif table[u"include-tests"] == u"NDR":
779                         result = \
780                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
781                     else:
782                         result = None
783                     if result is not None:
784                         tbl_dict[tst_name_mod][u"ref-data"].append(
785                             result)
786                 except (KeyError, TypeError):
787                     continue
788
789     tbl_lst = list()
790     for tst_name in tbl_dict:
791         item = [tbl_dict[tst_name][u"name"], ]
792         data_r = tbl_dict[tst_name][u"ref-data"]
793         if data_r:
794             if table[u"include-tests"] == u"MRR":
795                 data_r_mean = data_r[0][0]
796                 data_r_stdev = data_r[0][1]
797             else:
798                 data_r_mean = mean(data_r)
799                 data_r_stdev = stdev(data_r)
800             item.append(round(data_r_mean / 1e6, 1))
801             item.append(round(data_r_stdev / 1e6, 1))
802         else:
803             data_r_mean = None
804             data_r_stdev = None
805             item.extend([None, None])
806         data_c = tbl_dict[tst_name][u"cmp-data"]
807         if data_c:
808             if table[u"include-tests"] == u"MRR":
809                 data_c_mean = data_c[0][0]
810                 data_c_stdev = data_c[0][1]
811             else:
812                 data_c_mean = mean(data_c)
813                 data_c_stdev = stdev(data_c)
814             item.append(round(data_c_mean / 1e6, 1))
815             item.append(round(data_c_stdev / 1e6, 1))
816         else:
817             data_c_mean = None
818             data_c_stdev = None
819             item.extend([None, None])
820         if data_r_mean is not None and data_c_mean is not None:
821             delta, d_stdev = relative_change_stdev(
822                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
823             try:
824                 item.append(round(delta))
825             except ValueError:
826                 item.append(delta)
827             try:
828                 item.append(round(d_stdev))
829             except ValueError:
830                 item.append(d_stdev)
831             tbl_lst.append(item)
832
833     # Sort the table according to the relative change
834     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
835
836     # Generate csv tables:
837     csv_file_name = f"{table[u'output-file']}.csv"
838     with open(csv_file_name, u"wt") as file_handler:
839         file_handler.write(header_str)
840         for test in tbl_lst:
841             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
842
843     convert_csv_to_pretty_txt(
844         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
845     )
846     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
847         file_handler.write(legend)
848
849     # Generate html table:
850     _tpc_generate_html_table(
851         header,
852         tbl_lst,
853         table[u'output-file'],
854         legend=legend,
855         title=table.get(u"title", u"")
856     )
857
858
859 def table_perf_trending_dash(table, input_data):
860     """Generate the table(s) with algorithm:
861     table_perf_trending_dash
862     specified in the specification file.
863
864     :param table: Table to generate.
865     :param input_data: Data to process.
866     :type table: pandas.Series
867     :type input_data: InputData
868     """
869
870     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
871
872     # Transform the data
873     logging.info(
874         f"    Creating the data set for the {table.get(u'type', u'')} "
875         f"{table.get(u'title', u'')}."
876     )
877     data = input_data.filter_data(table, continue_on_error=True)
878
879     # Prepare the header of the tables
880     header = [
881         u"Test Case",
882         u"Trend [Mpps]",
883         u"Short-Term Change [%]",
884         u"Long-Term Change [%]",
885         u"Regressions [#]",
886         u"Progressions [#]"
887     ]
888     header_str = u",".join(header) + u"\n"
889
890     incl_tests = table.get(u"include-tests", u"MRR")
891
892     # Prepare data to the table:
893     tbl_dict = dict()
894     for job, builds in table[u"data"].items():
895         for build in builds:
896             for tst_name, tst_data in data[job][str(build)].items():
897                 if tst_name.lower() in table.get(u"ignore-list", list()):
898                     continue
899                 if tbl_dict.get(tst_name, None) is None:
900                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
901                     if not groups:
902                         continue
903                     nic = groups.group(0)
904                     tbl_dict[tst_name] = {
905                         u"name": f"{nic}-{tst_data[u'name']}",
906                         u"data": OrderedDict()
907                     }
908                 try:
909                     if incl_tests == u"MRR":
910                         tbl_dict[tst_name][u"data"][str(build)] = \
911                             tst_data[u"result"][u"receive-rate"]
912                     elif incl_tests == u"NDR":
913                         tbl_dict[tst_name][u"data"][str(build)] = \
914                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
915                     elif incl_tests == u"PDR":
916                         tbl_dict[tst_name][u"data"][str(build)] = \
917                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
918                 except (TypeError, KeyError):
919                     pass  # No data in output.xml for this test
920
921     tbl_lst = list()
922     for tst_name in tbl_dict:
923         data_t = tbl_dict[tst_name][u"data"]
924         if len(data_t) < 2:
925             continue
926
927         try:
928             classification_lst, avgs, _ = classify_anomalies(data_t)
929         except ValueError as err:
930             logging.info(f"{err} Skipping")
931             return
932
933         win_size = min(len(data_t), table[u"window"])
934         long_win_size = min(len(data_t), table[u"long-trend-window"])
935
936         try:
937             max_long_avg = max(
938                 [x for x in avgs[-long_win_size:-win_size]
939                  if not isnan(x)])
940         except ValueError:
941             max_long_avg = nan
942         last_avg = avgs[-1]
943         avg_week_ago = avgs[max(-win_size, -len(avgs))]
944
945         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
946             rel_change_last = nan
947         else:
948             rel_change_last = round(
949                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
950
951         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
952             rel_change_long = nan
953         else:
954             rel_change_long = round(
955                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
956
957         if classification_lst:
958             if isnan(rel_change_last) and isnan(rel_change_long):
959                 continue
960             if isnan(last_avg) or isnan(rel_change_last) or \
961                     isnan(rel_change_long):
962                 continue
963             tbl_lst.append(
964                 [tbl_dict[tst_name][u"name"],
965                  round(last_avg / 1e6, 2),
966                  rel_change_last,
967                  rel_change_long,
968                  classification_lst[-win_size+1:].count(u"regression"),
969                  classification_lst[-win_size+1:].count(u"progression")])
970
971     tbl_lst.sort(key=lambda rel: rel[0])
972     tbl_lst.sort(key=lambda rel: rel[3])
973     tbl_lst.sort(key=lambda rel: rel[2])
974
975     tbl_sorted = list()
976     for nrr in range(table[u"window"], -1, -1):
977         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
978         for nrp in range(table[u"window"], -1, -1):
979             tbl_out = [item for item in tbl_reg if item[5] == nrp]
980             tbl_sorted.extend(tbl_out)
981
982     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
983
984     logging.info(f"    Writing file: {file_name}")
985     with open(file_name, u"wt") as file_handler:
986         file_handler.write(header_str)
987         for test in tbl_sorted:
988             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
989
990     logging.info(f"    Writing file: {table[u'output-file']}.txt")
991     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
992
993
994 def _generate_url(testbed, test_name):
995     """Generate URL to a trending plot from the name of the test case.
996
997     :param testbed: The testbed used for testing.
998     :param test_name: The name of the test case.
999     :type testbed: str
1000     :type test_name: str
1001     :returns: The URL to the plot with the trending data for the given test
1002         case.
1003     :rtype str
1004     """
1005
1006     if u"x520" in test_name:
1007         nic = u"x520"
1008     elif u"x710" in test_name:
1009         nic = u"x710"
1010     elif u"xl710" in test_name:
1011         nic = u"xl710"
1012     elif u"xxv710" in test_name:
1013         nic = u"xxv710"
1014     elif u"vic1227" in test_name:
1015         nic = u"vic1227"
1016     elif u"vic1385" in test_name:
1017         nic = u"vic1385"
1018     elif u"x553" in test_name:
1019         nic = u"x553"
1020     elif u"cx556" in test_name or u"cx556a" in test_name:
1021         nic = u"cx556a"
1022     else:
1023         nic = u""
1024
1025     if u"64b" in test_name:
1026         frame_size = u"64b"
1027     elif u"78b" in test_name:
1028         frame_size = u"78b"
1029     elif u"imix" in test_name:
1030         frame_size = u"imix"
1031     elif u"9000b" in test_name:
1032         frame_size = u"9000b"
1033     elif u"1518b" in test_name:
1034         frame_size = u"1518b"
1035     elif u"114b" in test_name:
1036         frame_size = u"114b"
1037     else:
1038         frame_size = u""
1039
1040     if u"1t1c" in test_name or \
1041         (u"-1c-" in test_name and
1042          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1043         cores = u"1t1c"
1044     elif u"2t2c" in test_name or \
1045          (u"-2c-" in test_name and
1046           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1047         cores = u"2t2c"
1048     elif u"4t4c" in test_name or \
1049          (u"-4c-" in test_name and
1050           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1051         cores = u"4t4c"
1052     elif u"2t1c" in test_name or \
1053          (u"-1c-" in test_name and
1054           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1055         cores = u"2t1c"
1056     elif u"4t2c" in test_name or \
1057          (u"-2c-" in test_name and
1058           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1059         cores = u"4t2c"
1060     elif u"8t4c" in test_name or \
1061          (u"-4c-" in test_name and
1062           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1063         cores = u"8t4c"
1064     else:
1065         cores = u""
1066
1067     if u"testpmd" in test_name:
1068         driver = u"testpmd"
1069     elif u"l3fwd" in test_name:
1070         driver = u"l3fwd"
1071     elif u"avf" in test_name:
1072         driver = u"avf"
1073     elif u"rdma" in test_name:
1074         driver = u"rdma"
1075     elif u"dnv" in testbed or u"tsh" in testbed:
1076         driver = u"ixgbe"
1077     else:
1078         driver = u"dpdk"
1079
1080     if u"macip-iacl1s" in test_name:
1081         bsf = u"features-macip-iacl1"
1082     elif u"macip-iacl10s" in test_name:
1083         bsf = u"features-macip-iacl10"
1084     elif u"macip-iacl50s" in test_name:
1085         bsf = u"features-macip-iacl50"
1086     elif u"iacl1s" in test_name:
1087         bsf = u"features-iacl1"
1088     elif u"iacl10s" in test_name:
1089         bsf = u"features-iacl10"
1090     elif u"iacl50s" in test_name:
1091         bsf = u"features-iacl50"
1092     elif u"oacl1s" in test_name:
1093         bsf = u"features-oacl1"
1094     elif u"oacl10s" in test_name:
1095         bsf = u"features-oacl10"
1096     elif u"oacl50s" in test_name:
1097         bsf = u"features-oacl50"
1098     elif u"nat44det" in test_name:
1099         bsf = u"nat44det-bidir"
1100     elif u"nat44ed" in test_name and u"udir" in test_name:
1101         bsf = u"nat44ed-udir"
1102     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1103         bsf = u"udp-cps"
1104     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1105         bsf = u"tcp-cps"
1106     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1107         bsf = u"udp-pps"
1108     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1109         bsf = u"tcp-pps"
1110     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1111         bsf = u"udp-tput"
1112     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1113         bsf = u"tcp-tput"
1114     elif u"udpsrcscale" in test_name:
1115         bsf = u"features-udp"
1116     elif u"iacl" in test_name:
1117         bsf = u"features"
1118     elif u"policer" in test_name:
1119         bsf = u"features"
1120     elif u"adl" in test_name:
1121         bsf = u"features"
1122     elif u"cop" in test_name:
1123         bsf = u"features"
1124     elif u"nat" in test_name:
1125         bsf = u"features"
1126     elif u"macip" in test_name:
1127         bsf = u"features"
1128     elif u"scale" in test_name:
1129         bsf = u"scale"
1130     elif u"base" in test_name:
1131         bsf = u"base"
1132     else:
1133         bsf = u"base"
1134
1135     if u"114b" in test_name and u"vhost" in test_name:
1136         domain = u"vts"
1137     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1138         domain = u"nat44"
1139         if u"nat44det" in test_name:
1140             domain += u"-det-bidir"
1141         else:
1142             domain += u"-ed"
1143         if u"udir" in test_name:
1144             domain += u"-unidir"
1145         elif u"-ethip4udp-" in test_name:
1146             domain += u"-udp"
1147         elif u"-ethip4tcp-" in test_name:
1148             domain += u"-tcp"
1149         if u"-cps" in test_name:
1150             domain += u"-cps"
1151         elif u"-pps" in test_name:
1152             domain += u"-pps"
1153         elif u"-tput" in test_name:
1154             domain += u"-tput"
1155     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1156         domain = u"dpdk"
1157     elif u"memif" in test_name:
1158         domain = u"container_memif"
1159     elif u"srv6" in test_name:
1160         domain = u"srv6"
1161     elif u"vhost" in test_name:
1162         domain = u"vhost"
1163         if u"vppl2xc" in test_name:
1164             driver += u"-vpp"
1165         else:
1166             driver += u"-testpmd"
1167         if u"lbvpplacp" in test_name:
1168             bsf += u"-link-bonding"
1169     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1170         domain = u"nf_service_density_vnfc"
1171     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1172         domain = u"nf_service_density_cnfc"
1173     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1174         domain = u"nf_service_density_cnfp"
1175     elif u"ipsec" in test_name:
1176         domain = u"ipsec"
1177         if u"sw" in test_name:
1178             bsf += u"-sw"
1179         elif u"hw" in test_name:
1180             bsf += u"-hw"
1181     elif u"ethip4vxlan" in test_name:
1182         domain = u"ip4_tunnels"
1183     elif u"ethip4udpgeneve" in test_name:
1184         domain = u"ip4_tunnels"
1185     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1186         domain = u"ip4"
1187     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1188         domain = u"ip6"
1189     elif u"l2xcbase" in test_name or \
1190             u"l2xcscale" in test_name or \
1191             u"l2bdbasemaclrn" in test_name or \
1192             u"l2bdscale" in test_name or \
1193             u"l2patch" in test_name:
1194         domain = u"l2"
1195     else:
1196         domain = u""
1197
1198     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1199     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1200
1201     return file_name + anchor_name
1202
1203
1204 def table_perf_trending_dash_html(table, input_data):
1205     """Generate the table(s) with algorithm:
1206     table_perf_trending_dash_html specified in the specification
1207     file.
1208
1209     :param table: Table to generate.
1210     :param input_data: Data to process.
1211     :type table: dict
1212     :type input_data: InputData
1213     """
1214
1215     _ = input_data
1216
1217     if not table.get(u"testbed", None):
1218         logging.error(
1219             f"The testbed is not defined for the table "
1220             f"{table.get(u'title', u'')}. Skipping."
1221         )
1222         return
1223
1224     test_type = table.get(u"test-type", u"MRR")
1225     if test_type not in (u"MRR", u"NDR", u"PDR"):
1226         logging.error(
1227             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1228             f"Skipping."
1229         )
1230         return
1231
1232     if test_type in (u"NDR", u"PDR"):
1233         lnk_dir = u"../ndrpdr_trending/"
1234         lnk_sufix = f"-{test_type.lower()}"
1235     else:
1236         lnk_dir = u"../trending/"
1237         lnk_sufix = u""
1238
1239     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1240
1241     try:
1242         with open(table[u"input-file"], u'rt') as csv_file:
1243             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1244     except FileNotFoundError as err:
1245         logging.warning(f"{err}")
1246         return
1247     except KeyError:
1248         logging.warning(u"The input file is not defined.")
1249         return
1250     except csv.Error as err:
1251         logging.warning(
1252             f"Not possible to process the file {table[u'input-file']}.\n"
1253             f"{repr(err)}"
1254         )
1255         return
1256
1257     # Table:
1258     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1259
1260     # Table header:
1261     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1262     for idx, item in enumerate(csv_lst[0]):
1263         alignment = u"left" if idx == 0 else u"center"
1264         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1265         thead.text = item
1266
1267     # Rows:
1268     colors = {
1269         u"regression": (
1270             u"#ffcccc",
1271             u"#ff9999"
1272         ),
1273         u"progression": (
1274             u"#c6ecc6",
1275             u"#9fdf9f"
1276         ),
1277         u"normal": (
1278             u"#e9f1fb",
1279             u"#d4e4f7"
1280         )
1281     }
1282     for r_idx, row in enumerate(csv_lst[1:]):
1283         if int(row[4]):
1284             color = u"regression"
1285         elif int(row[5]):
1286             color = u"progression"
1287         else:
1288             color = u"normal"
1289         trow = ET.SubElement(
1290             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1291         )
1292
1293         # Columns:
1294         for c_idx, item in enumerate(row):
1295             tdata = ET.SubElement(
1296                 trow,
1297                 u"td",
1298                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1299             )
1300             # Name:
1301             if c_idx == 0 and table.get(u"add-links", True):
1302                 ref = ET.SubElement(
1303                     tdata,
1304                     u"a",
1305                     attrib=dict(
1306                         href=f"{lnk_dir}"
1307                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1308                         f"{lnk_sufix}"
1309                     )
1310                 )
1311                 ref.text = item
1312             else:
1313                 tdata.text = item
1314     try:
1315         with open(table[u"output-file"], u'w') as html_file:
1316             logging.info(f"    Writing file: {table[u'output-file']}")
1317             html_file.write(u".. raw:: html\n\n\t")
1318             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1319             html_file.write(u"\n\t<p><br><br></p>\n")
1320     except KeyError:
1321         logging.warning(u"The output file is not defined.")
1322         return
1323
1324
1325 def table_last_failed_tests(table, input_data):
1326     """Generate the table(s) with algorithm: table_last_failed_tests
1327     specified in the specification file.
1328
1329     :param table: Table to generate.
1330     :param input_data: Data to process.
1331     :type table: pandas.Series
1332     :type input_data: InputData
1333     """
1334
1335     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1336
1337     # Transform the data
1338     logging.info(
1339         f"    Creating the data set for the {table.get(u'type', u'')} "
1340         f"{table.get(u'title', u'')}."
1341     )
1342
1343     data = input_data.filter_data(table, continue_on_error=True)
1344
1345     if data is None or data.empty:
1346         logging.warning(
1347             f"    No data for the {table.get(u'type', u'')} "
1348             f"{table.get(u'title', u'')}."
1349         )
1350         return
1351
1352     tbl_list = list()
1353     for job, builds in table[u"data"].items():
1354         for build in builds:
1355             build = str(build)
1356             try:
1357                 version = input_data.metadata(job, build).get(u"version", u"")
1358                 duration = \
1359                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1360             except KeyError:
1361                 logging.error(f"Data for {job}: {build} is not present.")
1362                 return
1363             tbl_list.append(build)
1364             tbl_list.append(version)
1365             failed_tests = list()
1366             passed = 0
1367             failed = 0
1368             for tst_data in data[job][build].values:
1369                 if tst_data[u"status"] != u"FAIL":
1370                     passed += 1
1371                     continue
1372                 failed += 1
1373                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1374                 if not groups:
1375                     continue
1376                 nic = groups.group(0)
1377                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1378             tbl_list.append(passed)
1379             tbl_list.append(failed)
1380             tbl_list.append(duration)
1381             tbl_list.extend(failed_tests)
1382
1383     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1384     logging.info(f"    Writing file: {file_name}")
1385     with open(file_name, u"wt") as file_handler:
1386         for test in tbl_list:
1387             file_handler.write(f"{test}\n")
1388
1389
1390 def table_failed_tests(table, input_data):
1391     """Generate the table(s) with algorithm: table_failed_tests
1392     specified in the specification file.
1393
1394     :param table: Table to generate.
1395     :param input_data: Data to process.
1396     :type table: pandas.Series
1397     :type input_data: InputData
1398     """
1399
1400     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1401
1402     # Transform the data
1403     logging.info(
1404         f"    Creating the data set for the {table.get(u'type', u'')} "
1405         f"{table.get(u'title', u'')}."
1406     )
1407     data = input_data.filter_data(table, continue_on_error=True)
1408
1409     test_type = u"MRR"
1410     if u"NDRPDR" in table.get(u"filter", list()):
1411         test_type = u"NDRPDR"
1412
1413     # Prepare the header of the tables
1414     header = [
1415         u"Test Case",
1416         u"Failures [#]",
1417         u"Last Failure [Time]",
1418         u"Last Failure [VPP-Build-Id]",
1419         u"Last Failure [CSIT-Job-Build-Id]"
1420     ]
1421
1422     # Generate the data for the table according to the model in the table
1423     # specification
1424
1425     now = dt.utcnow()
1426     timeperiod = timedelta(int(table.get(u"window", 7)))
1427
1428     tbl_dict = dict()
1429     for job, builds in table[u"data"].items():
1430         for build in builds:
1431             build = str(build)
1432             for tst_name, tst_data in data[job][build].items():
1433                 if tst_name.lower() in table.get(u"ignore-list", list()):
1434                     continue
1435                 if tbl_dict.get(tst_name, None) is None:
1436                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1437                     if not groups:
1438                         continue
1439                     nic = groups.group(0)
1440                     tbl_dict[tst_name] = {
1441                         u"name": f"{nic}-{tst_data[u'name']}",
1442                         u"data": OrderedDict()
1443                     }
1444                 try:
1445                     generated = input_data.metadata(job, build).\
1446                         get(u"generated", u"")
1447                     if not generated:
1448                         continue
1449                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1450                     if (now - then) <= timeperiod:
1451                         tbl_dict[tst_name][u"data"][build] = (
1452                             tst_data[u"status"],
1453                             generated,
1454                             input_data.metadata(job, build).get(u"version",
1455                                                                 u""),
1456                             build
1457                         )
1458                 except (TypeError, KeyError) as err:
1459                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1460
1461     max_fails = 0
1462     tbl_lst = list()
1463     for tst_data in tbl_dict.values():
1464         fails_nr = 0
1465         fails_last_date = u""
1466         fails_last_vpp = u""
1467         fails_last_csit = u""
1468         for val in tst_data[u"data"].values():
1469             if val[0] == u"FAIL":
1470                 fails_nr += 1
1471                 fails_last_date = val[1]
1472                 fails_last_vpp = val[2]
1473                 fails_last_csit = val[3]
1474         if fails_nr:
1475             max_fails = fails_nr if fails_nr > max_fails else max_fails
1476             tbl_lst.append([
1477                 tst_data[u"name"],
1478                 fails_nr,
1479                 fails_last_date,
1480                 fails_last_vpp,
1481                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1482                 f"-build-{fails_last_csit}"
1483             ])
1484
1485     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1486     tbl_sorted = list()
1487     for nrf in range(max_fails, -1, -1):
1488         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1489         tbl_sorted.extend(tbl_fails)
1490
1491     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1492     logging.info(f"    Writing file: {file_name}")
1493     with open(file_name, u"wt") as file_handler:
1494         file_handler.write(u",".join(header) + u"\n")
1495         for test in tbl_sorted:
1496             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1497
1498     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1499     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1500
1501
1502 def table_failed_tests_html(table, input_data):
1503     """Generate the table(s) with algorithm: table_failed_tests_html
1504     specified in the specification file.
1505
1506     :param table: Table to generate.
1507     :param input_data: Data to process.
1508     :type table: pandas.Series
1509     :type input_data: InputData
1510     """
1511
1512     _ = input_data
1513
1514     if not table.get(u"testbed", None):
1515         logging.error(
1516             f"The testbed is not defined for the table "
1517             f"{table.get(u'title', u'')}. Skipping."
1518         )
1519         return
1520
1521     test_type = table.get(u"test-type", u"MRR")
1522     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1523         logging.error(
1524             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1525             f"Skipping."
1526         )
1527         return
1528
1529     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1530         lnk_dir = u"../ndrpdr_trending/"
1531         lnk_sufix = u"-pdr"
1532     else:
1533         lnk_dir = u"../trending/"
1534         lnk_sufix = u""
1535
1536     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1537
1538     try:
1539         with open(table[u"input-file"], u'rt') as csv_file:
1540             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1541     except KeyError:
1542         logging.warning(u"The input file is not defined.")
1543         return
1544     except csv.Error as err:
1545         logging.warning(
1546             f"Not possible to process the file {table[u'input-file']}.\n"
1547             f"{repr(err)}"
1548         )
1549         return
1550
1551     # Table:
1552     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1553
1554     # Table header:
1555     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1556     for idx, item in enumerate(csv_lst[0]):
1557         alignment = u"left" if idx == 0 else u"center"
1558         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1559         thead.text = item
1560
1561     # Rows:
1562     colors = (u"#e9f1fb", u"#d4e4f7")
1563     for r_idx, row in enumerate(csv_lst[1:]):
1564         background = colors[r_idx % 2]
1565         trow = ET.SubElement(
1566             failed_tests, u"tr", attrib=dict(bgcolor=background)
1567         )
1568
1569         # Columns:
1570         for c_idx, item in enumerate(row):
1571             tdata = ET.SubElement(
1572                 trow,
1573                 u"td",
1574                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1575             )
1576             # Name:
1577             if c_idx == 0 and table.get(u"add-links", True):
1578                 ref = ET.SubElement(
1579                     tdata,
1580                     u"a",
1581                     attrib=dict(
1582                         href=f"{lnk_dir}"
1583                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1584                         f"{lnk_sufix}"
1585                     )
1586                 )
1587                 ref.text = item
1588             else:
1589                 tdata.text = item
1590     try:
1591         with open(table[u"output-file"], u'w') as html_file:
1592             logging.info(f"    Writing file: {table[u'output-file']}")
1593             html_file.write(u".. raw:: html\n\n\t")
1594             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1595             html_file.write(u"\n\t<p><br><br></p>\n")
1596     except KeyError:
1597         logging.warning(u"The output file is not defined.")
1598         return
1599
1600
1601 def table_comparison(table, input_data):
1602     """Generate the table(s) with algorithm: table_comparison
1603     specified in the specification file.
1604
1605     :param table: Table to generate.
1606     :param input_data: Data to process.
1607     :type table: pandas.Series
1608     :type input_data: InputData
1609     """
1610     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1611
1612     # Transform the data
1613     logging.info(
1614         f"    Creating the data set for the {table.get(u'type', u'')} "
1615         f"{table.get(u'title', u'')}."
1616     )
1617
1618     columns = table.get(u"columns", None)
1619     if not columns:
1620         logging.error(
1621             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1622         )
1623         return
1624
1625     cols = list()
1626     for idx, col in enumerate(columns):
1627         if col.get(u"data-set", None) is None:
1628             logging.warning(f"No data for column {col.get(u'title', u'')}")
1629             continue
1630         tag = col.get(u"tag", None)
1631         data = input_data.filter_data(
1632             table,
1633             params=[
1634                 u"throughput",
1635                 u"result",
1636                 u"latency",
1637                 u"name",
1638                 u"parent",
1639                 u"tags"
1640             ],
1641             data=col[u"data-set"],
1642             continue_on_error=True
1643         )
1644         col_data = {
1645             u"title": col.get(u"title", f"Column{idx}"),
1646             u"data": dict()
1647         }
1648         for builds in data.values:
1649             for build in builds:
1650                 for tst_name, tst_data in build.items():
1651                     if tag and tag not in tst_data[u"tags"]:
1652                         continue
1653                     tst_name_mod = \
1654                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1655                         replace(u"2n1l-", u"")
1656                     if col_data[u"data"].get(tst_name_mod, None) is None:
1657                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1658                         if u"across testbeds" in table[u"title"].lower() or \
1659                                 u"across topologies" in table[u"title"].lower():
1660                             name = _tpc_modify_displayed_test_name(name)
1661                         col_data[u"data"][tst_name_mod] = {
1662                             u"name": name,
1663                             u"replace": True,
1664                             u"data": list(),
1665                             u"mean": None,
1666                             u"stdev": None
1667                         }
1668                     _tpc_insert_data(
1669                         target=col_data[u"data"][tst_name_mod],
1670                         src=tst_data,
1671                         include_tests=table[u"include-tests"]
1672                     )
1673
1674         replacement = col.get(u"data-replacement", None)
1675         if replacement:
1676             rpl_data = input_data.filter_data(
1677                 table,
1678                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1679                 data=replacement,
1680                 continue_on_error=True
1681             )
1682             for builds in rpl_data.values:
1683                 for build in builds:
1684                     for tst_name, tst_data in build.items():
1685                         if tag and tag not in tst_data[u"tags"]:
1686                             continue
1687                         tst_name_mod = \
1688                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1689                             replace(u"2n1l-", u"")
1690                         if col_data[u"data"].get(tst_name_mod, None) is None:
1691                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1692                             if u"across testbeds" in table[u"title"].lower() \
1693                                     or u"across topologies" in \
1694                                     table[u"title"].lower():
1695                                 name = _tpc_modify_displayed_test_name(name)
1696                             col_data[u"data"][tst_name_mod] = {
1697                                 u"name": name,
1698                                 u"replace": False,
1699                                 u"data": list(),
1700                                 u"mean": None,
1701                                 u"stdev": None
1702                             }
1703                         if col_data[u"data"][tst_name_mod][u"replace"]:
1704                             col_data[u"data"][tst_name_mod][u"replace"] = False
1705                             col_data[u"data"][tst_name_mod][u"data"] = list()
1706                         _tpc_insert_data(
1707                             target=col_data[u"data"][tst_name_mod],
1708                             src=tst_data,
1709                             include_tests=table[u"include-tests"]
1710                         )
1711
1712         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1713                 u"latency" in table[u"include-tests"]:
1714             for tst_name, tst_data in col_data[u"data"].items():
1715                 if tst_data[u"data"]:
1716                     tst_data[u"mean"] = mean(tst_data[u"data"])
1717                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1718
1719         cols.append(col_data)
1720
1721     tbl_dict = dict()
1722     for col in cols:
1723         for tst_name, tst_data in col[u"data"].items():
1724             if tbl_dict.get(tst_name, None) is None:
1725                 tbl_dict[tst_name] = {
1726                     "name": tst_data[u"name"]
1727                 }
1728             tbl_dict[tst_name][col[u"title"]] = {
1729                 u"mean": tst_data[u"mean"],
1730                 u"stdev": tst_data[u"stdev"]
1731             }
1732
1733     if not tbl_dict:
1734         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1735         return
1736
1737     tbl_lst = list()
1738     for tst_data in tbl_dict.values():
1739         row = [tst_data[u"name"], ]
1740         for col in cols:
1741             row.append(tst_data.get(col[u"title"], None))
1742         tbl_lst.append(row)
1743
1744     comparisons = table.get(u"comparisons", None)
1745     rcas = list()
1746     if comparisons and isinstance(comparisons, list):
1747         for idx, comp in enumerate(comparisons):
1748             try:
1749                 col_ref = int(comp[u"reference"])
1750                 col_cmp = int(comp[u"compare"])
1751             except KeyError:
1752                 logging.warning(u"Comparison: No references defined! Skipping.")
1753                 comparisons.pop(idx)
1754                 continue
1755             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1756                     col_ref == col_cmp):
1757                 logging.warning(f"Wrong values of reference={col_ref} "
1758                                 f"and/or compare={col_cmp}. Skipping.")
1759                 comparisons.pop(idx)
1760                 continue
1761             rca_file_name = comp.get(u"rca-file", None)
1762             if rca_file_name:
1763                 try:
1764                     with open(rca_file_name, u"r") as file_handler:
1765                         rcas.append(
1766                             {
1767                                 u"title": f"RCA{idx + 1}",
1768                                 u"data": load(file_handler, Loader=FullLoader)
1769                             }
1770                         )
1771                 except (YAMLError, IOError) as err:
1772                     logging.warning(
1773                         f"The RCA file {rca_file_name} does not exist or "
1774                         f"it is corrupted!"
1775                     )
1776                     logging.debug(repr(err))
1777                     rcas.append(None)
1778             else:
1779                 rcas.append(None)
1780     else:
1781         comparisons = None
1782
1783     tbl_cmp_lst = list()
1784     if comparisons:
1785         for row in tbl_lst:
1786             new_row = deepcopy(row)
1787             for comp in comparisons:
1788                 ref_itm = row[int(comp[u"reference"])]
1789                 if ref_itm is None and \
1790                         comp.get(u"reference-alt", None) is not None:
1791                     ref_itm = row[int(comp[u"reference-alt"])]
1792                 cmp_itm = row[int(comp[u"compare"])]
1793                 if ref_itm is not None and cmp_itm is not None and \
1794                         ref_itm[u"mean"] is not None and \
1795                         cmp_itm[u"mean"] is not None and \
1796                         ref_itm[u"stdev"] is not None and \
1797                         cmp_itm[u"stdev"] is not None:
1798                     try:
1799                         delta, d_stdev = relative_change_stdev(
1800                             ref_itm[u"mean"], cmp_itm[u"mean"],
1801                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1802                         )
1803                     except ZeroDivisionError:
1804                         break
1805                     if delta is None:
1806                         break
1807                     new_row.append({
1808                         u"mean": delta * 1e6,
1809                         u"stdev": d_stdev * 1e6
1810                     })
1811                 else:
1812                     break
1813             else:
1814                 tbl_cmp_lst.append(new_row)
1815
1816     try:
1817         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1818         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1819     except TypeError as err:
1820         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1821
1822     tbl_for_csv = list()
1823     for line in tbl_cmp_lst:
1824         row = [line[0], ]
1825         for idx, itm in enumerate(line[1:]):
1826             if itm is None or not isinstance(itm, dict) or\
1827                     itm.get(u'mean', None) is None or \
1828                     itm.get(u'stdev', None) is None:
1829                 row.append(u"NT")
1830                 row.append(u"NT")
1831             else:
1832                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1833                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1834         for rca in rcas:
1835             if rca is None:
1836                 continue
1837             rca_nr = rca[u"data"].get(row[0], u"-")
1838             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1839         tbl_for_csv.append(row)
1840
1841     header_csv = [u"Test Case", ]
1842     for col in cols:
1843         header_csv.append(f"Avg({col[u'title']})")
1844         header_csv.append(f"Stdev({col[u'title']})")
1845     for comp in comparisons:
1846         header_csv.append(
1847             f"Avg({comp.get(u'title', u'')})"
1848         )
1849         header_csv.append(
1850             f"Stdev({comp.get(u'title', u'')})"
1851         )
1852     for rca in rcas:
1853         if rca:
1854             header_csv.append(rca[u"title"])
1855
1856     legend_lst = table.get(u"legend", None)
1857     if legend_lst is None:
1858         legend = u""
1859     else:
1860         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1861
1862     footnote = u""
1863     if rcas and any(rcas):
1864         footnote += u"\nRoot Cause Analysis:\n"
1865         for rca in rcas:
1866             if rca:
1867                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1868
1869     csv_file_name = f"{table[u'output-file']}-csv.csv"
1870     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1871         file_handler.write(
1872             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1873         )
1874         for test in tbl_for_csv:
1875             file_handler.write(
1876                 u",".join([f'"{item}"' for item in test]) + u"\n"
1877             )
1878         if legend_lst:
1879             for item in legend_lst:
1880                 file_handler.write(f'"{item}"\n')
1881         if footnote:
1882             for itm in footnote.split(u"\n"):
1883                 file_handler.write(f'"{itm}"\n')
1884
1885     tbl_tmp = list()
1886     max_lens = [0, ] * len(tbl_cmp_lst[0])
1887     for line in tbl_cmp_lst:
1888         row = [line[0], ]
1889         for idx, itm in enumerate(line[1:]):
1890             if itm is None or not isinstance(itm, dict) or \
1891                     itm.get(u'mean', None) is None or \
1892                     itm.get(u'stdev', None) is None:
1893                 new_itm = u"NT"
1894             else:
1895                 if idx < len(cols):
1896                     new_itm = (
1897                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1898                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1899                         replace(u"nan", u"NaN")
1900                     )
1901                 else:
1902                     new_itm = (
1903                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1904                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1905                         replace(u"nan", u"NaN")
1906                     )
1907             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1908                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1909             row.append(new_itm)
1910
1911         tbl_tmp.append(row)
1912
1913     header = [u"Test Case", ]
1914     header.extend([col[u"title"] for col in cols])
1915     header.extend([comp.get(u"title", u"") for comp in comparisons])
1916
1917     tbl_final = list()
1918     for line in tbl_tmp:
1919         row = [line[0], ]
1920         for idx, itm in enumerate(line[1:]):
1921             if itm in (u"NT", u"NaN"):
1922                 row.append(itm)
1923                 continue
1924             itm_lst = itm.rsplit(u"\u00B1", 1)
1925             itm_lst[-1] = \
1926                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1927             itm_str = u"\u00B1".join(itm_lst)
1928
1929             if idx >= len(cols):
1930                 # Diffs
1931                 rca = rcas[idx - len(cols)]
1932                 if rca:
1933                     # Add rcas to diffs
1934                     rca_nr = rca[u"data"].get(row[0], None)
1935                     if rca_nr:
1936                         hdr_len = len(header[idx + 1]) - 1
1937                         if hdr_len < 19:
1938                             hdr_len = 19
1939                         rca_nr = f"[{rca_nr}]"
1940                         itm_str = (
1941                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1942                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1943                             f"{itm_str}"
1944                         )
1945             row.append(itm_str)
1946         tbl_final.append(row)
1947
1948     # Generate csv tables:
1949     csv_file_name = f"{table[u'output-file']}.csv"
1950     logging.info(f"    Writing the file {csv_file_name}")
1951     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1952         file_handler.write(u";".join(header) + u"\n")
1953         for test in tbl_final:
1954             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1955
1956     # Generate txt table:
1957     txt_file_name = f"{table[u'output-file']}.txt"
1958     logging.info(f"    Writing the file {txt_file_name}")
1959     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1960
1961     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1962         file_handler.write(legend)
1963         file_handler.write(footnote)
1964
1965     # Generate html table:
1966     _tpc_generate_html_table(
1967         header,
1968         tbl_final,
1969         table[u'output-file'],
1970         legend=legend,
1971         footnote=footnote,
1972         sort_data=False,
1973         title=table.get(u"title", u"")
1974     )
1975
1976
1977 def table_weekly_comparison(table, in_data):
1978     """Generate the table(s) with algorithm: table_weekly_comparison
1979     specified in the specification file.
1980
1981     :param table: Table to generate.
1982     :param in_data: Data to process.
1983     :type table: pandas.Series
1984     :type in_data: InputData
1985     """
1986     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1987
1988     # Transform the data
1989     logging.info(
1990         f"    Creating the data set for the {table.get(u'type', u'')} "
1991         f"{table.get(u'title', u'')}."
1992     )
1993
1994     incl_tests = table.get(u"include-tests", None)
1995     if incl_tests not in (u"NDR", u"PDR"):
1996         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1997         return
1998
1999     nr_cols = table.get(u"nr-of-data-columns", None)
2000     if not nr_cols or nr_cols < 2:
2001         logging.error(
2002             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2003         )
2004         return
2005
2006     data = in_data.filter_data(
2007         table,
2008         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2009         continue_on_error=True
2010     )
2011
2012     header = [
2013         [u"VPP Version", ],
2014         [u"Start Timestamp", ],
2015         [u"CSIT Build", ],
2016         [u"CSIT Testbed", ]
2017     ]
2018     tbl_dict = dict()
2019     idx = 0
2020     tb_tbl = table.get(u"testbeds", None)
2021     for job_name, job_data in data.items():
2022         for build_nr, build in job_data.items():
2023             if idx >= nr_cols:
2024                 break
2025             if build.empty:
2026                 continue
2027
2028             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2029             if tb_ip and tb_tbl:
2030                 testbed = tb_tbl.get(tb_ip, u"")
2031             else:
2032                 testbed = u""
2033             header[2].insert(1, build_nr)
2034             header[3].insert(1, testbed)
2035             header[1].insert(
2036                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2037             )
2038             header[0].insert(
2039                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2040             )
2041
2042             for tst_name, tst_data in build.items():
2043                 tst_name_mod = \
2044                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2045                 if not tbl_dict.get(tst_name_mod, None):
2046                     tbl_dict[tst_name_mod] = dict(
2047                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2048                     )
2049                 try:
2050                     tbl_dict[tst_name_mod][-idx - 1] = \
2051                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2052                 except (TypeError, IndexError, KeyError, ValueError):
2053                     pass
2054             idx += 1
2055
2056     if idx < nr_cols:
2057         logging.error(u"Not enough data to build the table! Skipping")
2058         return
2059
2060     cmp_dict = dict()
2061     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2062         idx_ref = cmp.get(u"reference", None)
2063         idx_cmp = cmp.get(u"compare", None)
2064         if idx_ref is None or idx_cmp is None:
2065             continue
2066         header[0].append(
2067             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2068             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2069         )
2070         header[1].append(u"")
2071         header[2].append(u"")
2072         header[3].append(u"")
2073         for tst_name, tst_data in tbl_dict.items():
2074             if not cmp_dict.get(tst_name, None):
2075                 cmp_dict[tst_name] = list()
2076             ref_data = tst_data.get(idx_ref, None)
2077             cmp_data = tst_data.get(idx_cmp, None)
2078             if ref_data is None or cmp_data is None:
2079                 cmp_dict[tst_name].append(float(u'nan'))
2080             else:
2081                 cmp_dict[tst_name].append(
2082                     relative_change(ref_data, cmp_data)
2083                 )
2084
2085     tbl_lst_none = list()
2086     tbl_lst = list()
2087     for tst_name, tst_data in tbl_dict.items():
2088         itm_lst = [tst_data[u"name"], ]
2089         for idx in range(nr_cols):
2090             item = tst_data.get(-idx - 1, None)
2091             if item is None:
2092                 itm_lst.insert(1, None)
2093             else:
2094                 itm_lst.insert(1, round(item / 1e6, 1))
2095         itm_lst.extend(
2096             [
2097                 None if itm is None else round(itm, 1)
2098                 for itm in cmp_dict[tst_name]
2099             ]
2100         )
2101         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2102             tbl_lst_none.append(itm_lst)
2103         else:
2104             tbl_lst.append(itm_lst)
2105
2106     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2107     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2108     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2109     tbl_lst.extend(tbl_lst_none)
2110
2111     # Generate csv table:
2112     csv_file_name = f"{table[u'output-file']}.csv"
2113     logging.info(f"    Writing the file {csv_file_name}")
2114     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2115         for hdr in header:
2116             file_handler.write(u",".join(hdr) + u"\n")
2117         for test in tbl_lst:
2118             file_handler.write(u",".join(
2119                 [
2120                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2121                     replace(u"null", u"-") for item in test
2122                 ]
2123             ) + u"\n")
2124
2125     txt_file_name = f"{table[u'output-file']}.txt"
2126     logging.info(f"    Writing the file {txt_file_name}")
2127     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2128
2129     # Reorganize header in txt table
2130     txt_table = list()
2131     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2132         for line in list(file_handler):
2133             txt_table.append(line)
2134     try:
2135         txt_table.insert(5, txt_table.pop(2))
2136         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2137             file_handler.writelines(txt_table)
2138     except IndexError:
2139         pass
2140
2141     # Generate html table:
2142     hdr_html = [
2143         u"<br>".join(row) for row in zip(*header)
2144     ]
2145     _tpc_generate_html_table(
2146         hdr_html,
2147         tbl_lst,
2148         table[u'output-file'],
2149         sort_data=True,
2150         title=table.get(u"title", u""),
2151         generate_rst=False
2152     )