fe0eaaa22e3d62c1cfe4fb2a2199aa372ef55e1b
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
35
36 from pal_utils import mean, stdev, classify_anomalies, \
37     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
38
39
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41
42
43 def generate_tables(spec, data):
44     """Generate all tables specified in the specification file.
45
46     :param spec: Specification read from the specification file.
47     :param data: Data to process.
48     :type spec: Specification
49     :type data: InputData
50     """
51
52     generator = {
53         u"table_merged_details": table_merged_details,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html,
61         u"table_comparison": table_comparison,
62         u"table_weekly_comparison": table_weekly_comparison
63     }
64
65     logging.info(u"Generating the tables ...")
66     for table in spec.tables:
67         try:
68             if table[u"algorithm"] == u"table_weekly_comparison":
69                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70             generator[table[u"algorithm"]](table, data)
71         except NameError as err:
72             logging.error(
73                 f"Probably algorithm {table[u'algorithm']} is not defined: "
74                 f"{repr(err)}"
75             )
76     logging.info(u"Done.")
77
78
79 def table_oper_data_html(table, input_data):
80     """Generate the table(s) with algorithm: html_table_oper_data
81     specified in the specification file.
82
83     :param table: Table to generate.
84     :param input_data: Data to process.
85     :type table: pandas.Series
86     :type input_data: InputData
87     """
88
89     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
90     # Transform the data
91     logging.info(
92         f"    Creating the data set for the {table.get(u'type', u'')} "
93         f"{table.get(u'title', u'')}."
94     )
95     data = input_data.filter_data(
96         table,
97         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98         continue_on_error=True
99     )
100     if data.empty:
101         return
102     data = input_data.merge_data(data)
103
104     sort_tests = table.get(u"sort", None)
105     if sort_tests:
106         args = dict(
107             inplace=True,
108             ascending=(sort_tests == u"ascending")
109         )
110         data.sort_index(**args)
111
112     suites = input_data.filter_data(
113         table,
114         continue_on_error=True,
115         data_set=u"suites"
116     )
117     if suites.empty:
118         return
119     suites = input_data.merge_data(suites)
120
121     def _generate_html_table(tst_data):
122         """Generate an HTML table with operational data for the given test.
123
124         :param tst_data: Test data to be used to generate the table.
125         :type tst_data: pandas.Series
126         :returns: HTML table with operational data.
127         :rtype: str
128         """
129
130         colors = {
131             u"header": u"#7eade7",
132             u"empty": u"#ffffff",
133             u"body": (u"#e9f1fb", u"#d4e4f7")
134         }
135
136         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137
138         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139         thead = ET.SubElement(
140             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141         )
142         thead.text = tst_data[u"name"]
143
144         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145         thead = ET.SubElement(
146             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147         )
148         thead.text = u"\t"
149
150         if tst_data.get(u"telemetry-show-run", None) is None or \
151                 isinstance(tst_data[u"telemetry-show-run"], str):
152             trow = ET.SubElement(
153                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
154             )
155             tcol = ET.SubElement(
156                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
157             )
158             tcol.text = u"No Data"
159
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
162             )
163             thead = ET.SubElement(
164                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
165             )
166             font = ET.SubElement(
167                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168             )
169             font.text = u"."
170             return str(ET.tostring(tbl, encoding=u"unicode"))
171
172         tbl_hdr = (
173             u"Name",
174             u"Nr of Vectors",
175             u"Nr of Packets",
176             u"Suspends",
177             u"Cycles per Packet",
178             u"Average Vector Size"
179         )
180
181         for dut_data in tst_data[u"telemetry-show-run"].values():
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
184             )
185             tcol = ET.SubElement(
186                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
187             )
188             if dut_data.get(u"runtime", None) is None:
189                 tcol.text = u"No Data"
190                 continue
191
192             runtime = dict()
193             for item in dut_data[u"runtime"].get(u"data", tuple()):
194                 tid = int(item[u"labels"][u"thread_id"])
195                 if runtime.get(tid, None) is None:
196                     runtime[tid] = dict()
197                 gnode = item[u"labels"][u"graph_node"]
198                 if runtime[tid].get(gnode, None) is None:
199                     runtime[tid][gnode] = dict()
200                 try:
201                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
202                 except ValueError:
203                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
204
205             threads = dict({idx: list() for idx in range(len(runtime))})
206             for idx, run_data in runtime.items():
207                 for gnode, gdata in run_data.items():
208                     if gdata[u"vectors"] > 0:
209                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
210                     elif gdata[u"calls"] > 0:
211                         clocks = gdata[u"clocks"] / gdata[u"calls"]
212                     elif gdata[u"suspends"] > 0:
213                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
214                     else:
215                         clocks = 0.0
216                     if gdata[u"calls"] > 0:
217                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
218                     else:
219                         vectors_call = 0.0
220                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221                             int(gdata[u"suspends"]):
222                         threads[idx].append([
223                             gnode,
224                             int(gdata[u"calls"]),
225                             int(gdata[u"vectors"]),
226                             int(gdata[u"suspends"]),
227                             clocks,
228                             vectors_call
229                         ])
230
231             bold = ET.SubElement(tcol, u"b")
232             bold.text = (
233                 f"Host IP: {dut_data.get(u'host', '')}, "
234                 f"Socket: {dut_data.get(u'socket', '')}"
235             )
236             trow = ET.SubElement(
237                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238             )
239             thead = ET.SubElement(
240                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241             )
242             thead.text = u"\t"
243
244             for thread_nr, thread in threads.items():
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247                 )
248                 tcol = ET.SubElement(
249                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 bold = ET.SubElement(tcol, u"b")
252                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253                 trow = ET.SubElement(
254                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255                 )
256                 for idx, col in enumerate(tbl_hdr):
257                     tcol = ET.SubElement(
258                         trow, u"td",
259                         attrib=dict(align=u"right" if idx else u"left")
260                     )
261                     font = ET.SubElement(
262                         tcol, u"font", attrib=dict(size=u"2")
263                     )
264                     bold = ET.SubElement(font, u"b")
265                     bold.text = col
266                 for row_nr, row in enumerate(thread):
267                     trow = ET.SubElement(
268                         tbl, u"tr",
269                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
270                     )
271                     for idx, col in enumerate(row):
272                         tcol = ET.SubElement(
273                             trow, u"td",
274                             attrib=dict(align=u"right" if idx else u"left")
275                         )
276                         font = ET.SubElement(
277                             tcol, u"font", attrib=dict(size=u"2")
278                         )
279                         if isinstance(col, float):
280                             font.text = f"{col:.2f}"
281                         else:
282                             font.text = str(col)
283                 trow = ET.SubElement(
284                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
285                 )
286                 thead = ET.SubElement(
287                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288                 )
289                 thead.text = u"\t"
290
291         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292         thead = ET.SubElement(
293             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
294         )
295         font = ET.SubElement(
296             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
297         )
298         font.text = u"."
299
300         return str(ET.tostring(tbl, encoding=u"unicode"))
301
302     for suite in suites.values:
303         html_table = str()
304         for test_data in data.values:
305             if test_data[u"parent"] not in suite[u"name"]:
306                 continue
307             html_table += _generate_html_table(test_data)
308         if not html_table:
309             continue
310         try:
311             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312             with open(f"{file_name}", u'w') as html_file:
313                 logging.info(f"    Writing file: {file_name}")
314                 html_file.write(u".. raw:: html\n\n\t")
315                 html_file.write(html_table)
316                 html_file.write(u"\n\t<p><br><br></p>\n")
317         except KeyError:
318             logging.warning(u"The output file is not defined.")
319             return
320     logging.info(u"  Done.")
321
322
323 def table_merged_details(table, input_data):
324     """Generate the table(s) with algorithm: table_merged_details
325     specified in the specification file.
326
327     :param table: Table to generate.
328     :param input_data: Data to process.
329     :type table: pandas.Series
330     :type input_data: InputData
331     """
332
333     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
334
335     # Transform the data
336     logging.info(
337         f"    Creating the data set for the {table.get(u'type', u'')} "
338         f"{table.get(u'title', u'')}."
339     )
340     data = input_data.filter_data(table, continue_on_error=True)
341     data = input_data.merge_data(data)
342
343     sort_tests = table.get(u"sort", None)
344     if sort_tests:
345         args = dict(
346             inplace=True,
347             ascending=(sort_tests == u"ascending")
348         )
349         data.sort_index(**args)
350
351     suites = input_data.filter_data(
352         table, continue_on_error=True, data_set=u"suites")
353     suites = input_data.merge_data(suites)
354
355     # Prepare the header of the tables
356     header = list()
357     for column in table[u"columns"]:
358         header.append(
359             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
360         )
361
362     for suite in suites.values:
363         # Generate data
364         suite_name = suite[u"name"]
365         table_lst = list()
366         for test in data.keys():
367             if data[test][u"status"] != u"PASS" or \
368                     data[test][u"parent"] not in suite_name:
369                 continue
370             row_lst = list()
371             for column in table[u"columns"]:
372                 try:
373                     col_data = str(data[test][column[
374                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
375                     # Do not include tests with "Test Failed" in test message
376                     if u"Test Failed" in col_data:
377                         continue
378                     col_data = col_data.replace(
379                         u"No Data", u"Not Captured     "
380                     )
381                     if column[u"data"].split(u" ")[1] in (u"name", ):
382                         if len(col_data) > 30:
383                             col_data_lst = col_data.split(u"-")
384                             half = int(len(col_data_lst) / 2)
385                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
386                                        f"- |br| " \
387                                        f"{u'-'.join(col_data_lst[half:])}"
388                         col_data = f" |prein| {col_data} |preout| "
389                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
390                         # Temporary solution: remove NDR results from message:
391                         if bool(table.get(u'remove-ndr', False)):
392                             try:
393                                 col_data = col_data.split(u"\n", 1)[1]
394                             except IndexError:
395                                 pass
396                         col_data = col_data.replace(u'\n', u' |br| ').\
397                             replace(u'\r', u'').replace(u'"', u"'")
398                         col_data = f" |prein| {col_data} |preout| "
399                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400                         col_data = col_data.replace(u'\n', u' |br| ')
401                         col_data = f" |prein| {col_data[:-5]} |preout| "
402                     row_lst.append(f'"{col_data}"')
403                 except KeyError:
404                     row_lst.append(u'"Not captured"')
405             if len(row_lst) == len(table[u"columns"]):
406                 table_lst.append(row_lst)
407
408         # Write the data to file
409         if table_lst:
410             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412             logging.info(f"      Writing file: {file_name}")
413             with open(file_name, u"wt") as file_handler:
414                 file_handler.write(u",".join(header) + u"\n")
415                 for item in table_lst:
416                     file_handler.write(u",".join(item) + u"\n")
417
418     logging.info(u"  Done.")
419
420
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422     """Modify a test name by replacing its parts.
423
424     :param test_name: Test name to be modified.
425     :param ignore_nic: If True, NIC is removed from TC name.
426     :type test_name: str
427     :type ignore_nic: bool
428     :returns: Modified test name.
429     :rtype: str
430     """
431     test_name_mod = test_name.\
432         replace(u"-ndrpdr", u"").\
433         replace(u"1t1c", u"1c").\
434         replace(u"2t1c", u"1c"). \
435         replace(u"2t2c", u"2c").\
436         replace(u"4t2c", u"2c"). \
437         replace(u"4t4c", u"4c").\
438         replace(u"8t4c", u"4c")
439
440     if ignore_nic:
441         return re.sub(REGEX_NIC, u"", test_name_mod)
442     return test_name_mod
443
444
445 def _tpc_modify_displayed_test_name(test_name):
446     """Modify a test name which is displayed in a table by replacing its parts.
447
448     :param test_name: Test name to be modified.
449     :type test_name: str
450     :returns: Modified test name.
451     :rtype: str
452     """
453     return test_name.\
454         replace(u"1t1c", u"1c").\
455         replace(u"2t1c", u"1c"). \
456         replace(u"2t2c", u"2c").\
457         replace(u"4t2c", u"2c"). \
458         replace(u"4t4c", u"4c").\
459         replace(u"8t4c", u"4c")
460
461
462 def _tpc_insert_data(target, src, include_tests):
463     """Insert src data to the target structure.
464
465     :param target: Target structure where the data is placed.
466     :param src: Source data to be placed into the target structure.
467     :param include_tests: Which results will be included (MRR, NDR, PDR).
468     :type target: list
469     :type src: dict
470     :type include_tests: str
471     """
472     try:
473         if include_tests == u"MRR":
474             target[u"mean"] = src[u"result"][u"receive-rate"]
475             target[u"stdev"] = src[u"result"][u"receive-stdev"]
476         elif include_tests == u"PDR":
477             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478         elif include_tests == u"NDR":
479             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480         elif u"latency" in include_tests:
481             keys = include_tests.split(u"-")
482             if len(keys) == 4:
483                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
484                 target[u"data"].append(
485                     float(u"nan") if lat == -1 else lat * 1e6
486                 )
487     except (KeyError, TypeError):
488         pass
489
490
491 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
492                              footnote=u"", sort_data=True, title=u"",
493                              generate_rst=True):
494     """Generate html table from input data with simple sorting possibility.
495
496     :param header: Table header.
497     :param data: Input data to be included in the table. It is a list of lists.
498         Inner lists are rows in the table. All inner lists must be of the same
499         length. The length of these lists must be the same as the length of the
500         header.
501     :param out_file_name: The name (relative or full path) where the
502         generated html table is written.
503     :param legend: The legend to display below the table.
504     :param footnote: The footnote to display below the table (and legend).
505     :param sort_data: If True the data sorting is enabled.
506     :param title: The table (and file) title.
507     :param generate_rst: If True, wrapping rst file is generated.
508     :type header: list
509     :type data: list of lists
510     :type out_file_name: str
511     :type legend: str
512     :type footnote: str
513     :type sort_data: bool
514     :type title: str
515     :type generate_rst: bool
516     """
517
518     try:
519         idx = header.index(u"Test Case")
520     except ValueError:
521         idx = 0
522     params = {
523         u"align-hdr": (
524             [u"left", u"right"],
525             [u"left", u"left", u"right"],
526             [u"left", u"left", u"left", u"right"]
527         ),
528         u"align-itm": (
529             [u"left", u"right"],
530             [u"left", u"left", u"right"],
531             [u"left", u"left", u"left", u"right"]
532         ),
533         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
534     }
535
536     df_data = pd.DataFrame(data, columns=header)
537
538     if sort_data:
539         df_sorted = [df_data.sort_values(
540             by=[key, header[idx]], ascending=[True, True]
541             if key != header[idx] else [False, True]) for key in header]
542         df_sorted_rev = [df_data.sort_values(
543             by=[key, header[idx]], ascending=[False, True]
544             if key != header[idx] else [True, True]) for key in header]
545         df_sorted.extend(df_sorted_rev)
546     else:
547         df_sorted = df_data
548
549     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
550                    for idx in range(len(df_data))]]
551     table_header = dict(
552         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
553         fill_color=u"#7eade7",
554         align=params[u"align-hdr"][idx],
555         font=dict(
556             family=u"Courier New",
557             size=12
558         )
559     )
560
561     fig = go.Figure()
562
563     if sort_data:
564         for table in df_sorted:
565             columns = [table.get(col) for col in header]
566             fig.add_trace(
567                 go.Table(
568                     columnwidth=params[u"width"][idx],
569                     header=table_header,
570                     cells=dict(
571                         values=columns,
572                         fill_color=fill_color,
573                         align=params[u"align-itm"][idx],
574                         font=dict(
575                             family=u"Courier New",
576                             size=12
577                         )
578                     )
579                 )
580             )
581
582         buttons = list()
583         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
585         for idx, hdr in enumerate(menu_items):
586             visible = [False, ] * len(menu_items)
587             visible[idx] = True
588             buttons.append(
589                 dict(
590                     label=hdr.replace(u" [Mpps]", u""),
591                     method=u"update",
592                     args=[{u"visible": visible}],
593                 )
594             )
595
596         fig.update_layout(
597             updatemenus=[
598                 go.layout.Updatemenu(
599                     type=u"dropdown",
600                     direction=u"down",
601                     x=0.0,
602                     xanchor=u"left",
603                     y=1.002,
604                     yanchor=u"bottom",
605                     active=len(menu_items) - 1,
606                     buttons=list(buttons)
607                 )
608             ],
609         )
610     else:
611         fig.add_trace(
612             go.Table(
613                 columnwidth=params[u"width"][idx],
614                 header=table_header,
615                 cells=dict(
616                     values=[df_sorted.get(col) for col in header],
617                     fill_color=fill_color,
618                     align=params[u"align-itm"][idx],
619                     font=dict(
620                         family=u"Courier New",
621                         size=12
622                     )
623                 )
624             )
625         )
626
627     ploff.plot(
628         fig,
629         show_link=False,
630         auto_open=False,
631         filename=f"{out_file_name}_in.html"
632     )
633
634     if not generate_rst:
635         return
636
637     file_name = out_file_name.split(u"/")[-1]
638     if u"vpp" in out_file_name:
639         path = u"_tmp/src/vpp_performance_tests/comparisons/"
640     else:
641         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
642     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
643     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
644         rst_file.write(
645             u"\n"
646             u".. |br| raw:: html\n\n    <br />\n\n\n"
647             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
648             u".. |preout| raw:: html\n\n    </pre>\n\n"
649         )
650         if title:
651             rst_file.write(f"{title}\n")
652             rst_file.write(f"{u'`' * len(title)}\n\n")
653         rst_file.write(
654             u".. raw:: html\n\n"
655             f'    <iframe frameborder="0" scrolling="no" '
656             f'width="1600" height="1200" '
657             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
658             f'</iframe>\n\n'
659         )
660
661         if legend:
662             try:
663                 itm_lst = legend[1:-2].split(u"\n")
664                 rst_file.write(
665                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
666                 )
667             except IndexError as err:
668                 logging.error(f"Legend cannot be written to html file\n{err}")
669         if footnote:
670             try:
671                 itm_lst = footnote[1:].split(u"\n")
672                 rst_file.write(
673                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
674                 )
675             except IndexError as err:
676                 logging.error(f"Footnote cannot be written to html file\n{err}")
677
678
679 def table_soak_vs_ndr(table, input_data):
680     """Generate the table(s) with algorithm: table_soak_vs_ndr
681     specified in the specification file.
682
683     :param table: Table to generate.
684     :param input_data: Data to process.
685     :type table: pandas.Series
686     :type input_data: InputData
687     """
688
689     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
690
691     # Transform the data
692     logging.info(
693         f"    Creating the data set for the {table.get(u'type', u'')} "
694         f"{table.get(u'title', u'')}."
695     )
696     data = input_data.filter_data(table, continue_on_error=True)
697
698     # Prepare the header of the table
699     try:
700         header = [
701             u"Test Case",
702             f"Avg({table[u'reference'][u'title']})",
703             f"Stdev({table[u'reference'][u'title']})",
704             f"Avg({table[u'compare'][u'title']})",
705             f"Stdev{table[u'compare'][u'title']})",
706             u"Diff",
707             u"Stdev(Diff)"
708         ]
709         header_str = u";".join(header) + u"\n"
710         legend = (
711             u"\nLegend:\n"
712             f"Avg({table[u'reference'][u'title']}): "
713             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
714             f"from a series of runs of the listed tests.\n"
715             f"Stdev({table[u'reference'][u'title']}): "
716             f"Standard deviation value of {table[u'reference'][u'title']} "
717             f"[Mpps] computed from a series of runs of the listed tests.\n"
718             f"Avg({table[u'compare'][u'title']}): "
719             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
720             f"a series of runs of the listed tests.\n"
721             f"Stdev({table[u'compare'][u'title']}): "
722             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
723             f"computed from a series of runs of the listed tests.\n"
724             f"Diff({table[u'reference'][u'title']},"
725             f"{table[u'compare'][u'title']}): "
726             f"Percentage change calculated for mean values.\n"
727             u"Stdev(Diff): "
728             u"Standard deviation of percentage change calculated for mean "
729             u"values."
730         )
731     except (AttributeError, KeyError) as err:
732         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
733         return
734
735     # Create a list of available SOAK test results:
736     tbl_dict = dict()
737     for job, builds in table[u"compare"][u"data"].items():
738         for build in builds:
739             for tst_name, tst_data in data[job][str(build)].items():
740                 if tst_data[u"type"] == u"SOAK":
741                     tst_name_mod = tst_name.replace(u"-soak", u"")
742                     if tbl_dict.get(tst_name_mod, None) is None:
743                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
744                         nic = groups.group(0) if groups else u""
745                         name = (
746                             f"{nic}-"
747                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
748                         )
749                         tbl_dict[tst_name_mod] = {
750                             u"name": name,
751                             u"ref-data": list(),
752                             u"cmp-data": list()
753                         }
754                     try:
755                         tbl_dict[tst_name_mod][u"cmp-data"].append(
756                             tst_data[u"throughput"][u"LOWER"])
757                     except (KeyError, TypeError):
758                         pass
759     tests_lst = tbl_dict.keys()
760
761     # Add corresponding NDR test results:
762     for job, builds in table[u"reference"][u"data"].items():
763         for build in builds:
764             for tst_name, tst_data in data[job][str(build)].items():
765                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
766                     replace(u"-mrr", u"")
767                 if tst_name_mod not in tests_lst:
768                     continue
769                 try:
770                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
771                         continue
772                     if table[u"include-tests"] == u"MRR":
773                         result = (tst_data[u"result"][u"receive-rate"],
774                                   tst_data[u"result"][u"receive-stdev"])
775                     elif table[u"include-tests"] == u"PDR":
776                         result = \
777                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
778                     elif table[u"include-tests"] == u"NDR":
779                         result = \
780                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
781                     else:
782                         result = None
783                     if result is not None:
784                         tbl_dict[tst_name_mod][u"ref-data"].append(
785                             result)
786                 except (KeyError, TypeError):
787                     continue
788
789     tbl_lst = list()
790     for tst_name in tbl_dict:
791         item = [tbl_dict[tst_name][u"name"], ]
792         data_r = tbl_dict[tst_name][u"ref-data"]
793         if data_r:
794             if table[u"include-tests"] == u"MRR":
795                 data_r_mean = data_r[0][0]
796                 data_r_stdev = data_r[0][1]
797             else:
798                 data_r_mean = mean(data_r)
799                 data_r_stdev = stdev(data_r)
800             item.append(round(data_r_mean / 1e6, 1))
801             item.append(round(data_r_stdev / 1e6, 1))
802         else:
803             data_r_mean = None
804             data_r_stdev = None
805             item.extend([None, None])
806         data_c = tbl_dict[tst_name][u"cmp-data"]
807         if data_c:
808             if table[u"include-tests"] == u"MRR":
809                 data_c_mean = data_c[0][0]
810                 data_c_stdev = data_c[0][1]
811             else:
812                 data_c_mean = mean(data_c)
813                 data_c_stdev = stdev(data_c)
814             item.append(round(data_c_mean / 1e6, 1))
815             item.append(round(data_c_stdev / 1e6, 1))
816         else:
817             data_c_mean = None
818             data_c_stdev = None
819             item.extend([None, None])
820         if data_r_mean is not None and data_c_mean is not None:
821             delta, d_stdev = relative_change_stdev(
822                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
823             try:
824                 item.append(round(delta))
825             except ValueError:
826                 item.append(delta)
827             try:
828                 item.append(round(d_stdev))
829             except ValueError:
830                 item.append(d_stdev)
831             tbl_lst.append(item)
832
833     # Sort the table according to the relative change
834     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
835
836     # Generate csv tables:
837     csv_file_name = f"{table[u'output-file']}.csv"
838     with open(csv_file_name, u"wt") as file_handler:
839         file_handler.write(header_str)
840         for test in tbl_lst:
841             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
842
843     convert_csv_to_pretty_txt(
844         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
845     )
846     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
847         file_handler.write(legend)
848
849     # Generate html table:
850     _tpc_generate_html_table(
851         header,
852         tbl_lst,
853         table[u'output-file'],
854         legend=legend,
855         title=table.get(u"title", u"")
856     )
857
858
859 def table_perf_trending_dash(table, input_data):
860     """Generate the table(s) with algorithm:
861     table_perf_trending_dash
862     specified in the specification file.
863
864     :param table: Table to generate.
865     :param input_data: Data to process.
866     :type table: pandas.Series
867     :type input_data: InputData
868     """
869
870     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
871
872     # Transform the data
873     logging.info(
874         f"    Creating the data set for the {table.get(u'type', u'')} "
875         f"{table.get(u'title', u'')}."
876     )
877     data = input_data.filter_data(table, continue_on_error=True)
878
879     # Prepare the header of the tables
880     header = [
881         u"Test Case",
882         u"Trend [Mpps]",
883         u"Short-Term Change [%]",
884         u"Long-Term Change [%]",
885         u"Regressions [#]",
886         u"Progressions [#]"
887     ]
888     header_str = u",".join(header) + u"\n"
889
890     incl_tests = table.get(u"include-tests", u"MRR")
891
892     # Prepare data to the table:
893     tbl_dict = dict()
894     for job, builds in table[u"data"].items():
895         for build in builds:
896             for tst_name, tst_data in data[job][str(build)].items():
897                 if tst_name.lower() in table.get(u"ignore-list", list()):
898                     continue
899                 if tbl_dict.get(tst_name, None) is None:
900                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
901                     if not groups:
902                         continue
903                     nic = groups.group(0)
904                     tbl_dict[tst_name] = {
905                         u"name": f"{nic}-{tst_data[u'name']}",
906                         u"data": OrderedDict()
907                     }
908                 try:
909                     if incl_tests == u"MRR":
910                         tbl_dict[tst_name][u"data"][str(build)] = \
911                             tst_data[u"result"][u"receive-rate"]
912                     elif incl_tests == u"NDR":
913                         tbl_dict[tst_name][u"data"][str(build)] = \
914                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
915                     elif incl_tests == u"PDR":
916                         tbl_dict[tst_name][u"data"][str(build)] = \
917                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
918                 except (TypeError, KeyError):
919                     pass  # No data in output.xml for this test
920
921     tbl_lst = list()
922     for tst_name in tbl_dict:
923         data_t = tbl_dict[tst_name][u"data"]
924         if len(data_t) < 2:
925             continue
926
927         try:
928             classification_lst, avgs, _ = classify_anomalies(data_t)
929         except ValueError as err:
930             logging.info(f"{err} Skipping")
931             return
932
933         win_size = min(len(data_t), table[u"window"])
934         long_win_size = min(len(data_t), table[u"long-trend-window"])
935
936         try:
937             max_long_avg = max(
938                 [x for x in avgs[-long_win_size:-win_size]
939                  if not isnan(x)])
940         except ValueError:
941             max_long_avg = nan
942         last_avg = avgs[-1]
943         avg_week_ago = avgs[max(-win_size, -len(avgs))]
944
945         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
946             rel_change_last = nan
947         else:
948             rel_change_last = round(
949                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
950
951         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
952             rel_change_long = nan
953         else:
954             rel_change_long = round(
955                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
956
957         if classification_lst:
958             if isnan(rel_change_last) and isnan(rel_change_long):
959                 continue
960             if isnan(last_avg) or isnan(rel_change_last) or \
961                     isnan(rel_change_long):
962                 continue
963             tbl_lst.append(
964                 [tbl_dict[tst_name][u"name"],
965                  round(last_avg / 1e6, 2),
966                  rel_change_last,
967                  rel_change_long,
968                  classification_lst[-win_size+1:].count(u"regression"),
969                  classification_lst[-win_size+1:].count(u"progression")])
970
971     tbl_lst.sort(key=lambda rel: rel[0])
972     tbl_lst.sort(key=lambda rel: rel[3])
973     tbl_lst.sort(key=lambda rel: rel[2])
974
975     tbl_sorted = list()
976     for nrr in range(table[u"window"], -1, -1):
977         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
978         for nrp in range(table[u"window"], -1, -1):
979             tbl_out = [item for item in tbl_reg if item[5] == nrp]
980             tbl_sorted.extend(tbl_out)
981
982     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
983
984     logging.info(f"    Writing file: {file_name}")
985     with open(file_name, u"wt") as file_handler:
986         file_handler.write(header_str)
987         for test in tbl_sorted:
988             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
989
990     logging.info(f"    Writing file: {table[u'output-file']}.txt")
991     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
992
993
994 def _generate_url(testbed, test_name):
995     """Generate URL to a trending plot from the name of the test case.
996
997     :param testbed: The testbed used for testing.
998     :param test_name: The name of the test case.
999     :type testbed: str
1000     :type test_name: str
1001     :returns: The URL to the plot with the trending data for the given test
1002         case.
1003     :rtype str
1004     """
1005
1006     if u"x520" in test_name:
1007         nic = u"x520"
1008     elif u"x710" in test_name:
1009         nic = u"x710"
1010     elif u"xl710" in test_name:
1011         nic = u"xl710"
1012     elif u"xxv710" in test_name:
1013         nic = u"xxv710"
1014     elif u"vic1227" in test_name:
1015         nic = u"vic1227"
1016     elif u"vic1385" in test_name:
1017         nic = u"vic1385"
1018     elif u"x553" in test_name:
1019         nic = u"x553"
1020     elif u"cx556" in test_name or u"cx556a" in test_name:
1021         nic = u"cx556a"
1022     else:
1023         nic = u""
1024
1025     if u"64b" in test_name:
1026         frame_size = u"64b"
1027     elif u"78b" in test_name:
1028         frame_size = u"78b"
1029     elif u"imix" in test_name:
1030         frame_size = u"imix"
1031     elif u"9000b" in test_name:
1032         frame_size = u"9000b"
1033     elif u"1518b" in test_name:
1034         frame_size = u"1518b"
1035     elif u"114b" in test_name:
1036         frame_size = u"114b"
1037     else:
1038         frame_size = u""
1039
1040     if u"1t1c" in test_name or \
1041         (u"-1c-" in test_name and
1042          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1043         cores = u"1t1c"
1044     elif u"2t2c" in test_name or \
1045          (u"-2c-" in test_name and
1046           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1047         cores = u"2t2c"
1048     elif u"4t4c" in test_name or \
1049          (u"-4c-" in test_name and
1050           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1051         cores = u"4t4c"
1052     elif u"2t1c" in test_name or \
1053          (u"-1c-" in test_name and
1054           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1055         cores = u"2t1c"
1056     elif u"4t2c" in test_name or \
1057          (u"-2c-" in test_name and
1058           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1059         cores = u"4t2c"
1060     elif u"8t4c" in test_name or \
1061          (u"-4c-" in test_name and
1062           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1063         cores = u"8t4c"
1064     else:
1065         cores = u""
1066
1067     if u"testpmd" in test_name:
1068         driver = u"testpmd"
1069     elif u"l3fwd" in test_name:
1070         driver = u"l3fwd"
1071     elif u"avf" in test_name:
1072         driver = u"avf"
1073     elif u"rdma" in test_name:
1074         driver = u"rdma"
1075     elif u"dnv" in testbed or u"tsh" in testbed:
1076         driver = u"ixgbe"
1077     else:
1078         driver = u"dpdk"
1079
1080     if u"macip-iacl1s" in test_name:
1081         bsf = u"features-macip-iacl1"
1082     elif u"macip-iacl10s" in test_name:
1083         bsf = u"features-macip-iacl10"
1084     elif u"macip-iacl50s" in test_name:
1085         bsf = u"features-macip-iacl50"
1086     elif u"iacl1s" in test_name:
1087         bsf = u"features-iacl1"
1088     elif u"iacl10s" in test_name:
1089         bsf = u"features-iacl10"
1090     elif u"iacl50s" in test_name:
1091         bsf = u"features-iacl50"
1092     elif u"oacl1s" in test_name:
1093         bsf = u"features-oacl1"
1094     elif u"oacl10s" in test_name:
1095         bsf = u"features-oacl10"
1096     elif u"oacl50s" in test_name:
1097         bsf = u"features-oacl50"
1098     elif u"nat44det" in test_name:
1099         bsf = u"nat44det-bidir"
1100     elif u"nat44ed" in test_name and u"udir" in test_name:
1101         bsf = u"nat44ed-udir"
1102     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1103         bsf = u"udp-cps"
1104     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1105         bsf = u"tcp-cps"
1106     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1107         bsf = u"udp-pps"
1108     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1109         bsf = u"tcp-pps"
1110     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1111         bsf = u"udp-tput"
1112     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1113         bsf = u"tcp-tput"
1114     elif u"udpsrcscale" in test_name:
1115         bsf = u"features-udp"
1116     elif u"iacl" in test_name:
1117         bsf = u"features"
1118     elif u"policer" in test_name:
1119         bsf = u"features"
1120     elif u"adl" in test_name:
1121         bsf = u"features"
1122     elif u"cop" in test_name:
1123         bsf = u"features"
1124     elif u"nat" in test_name:
1125         bsf = u"features"
1126     elif u"macip" in test_name:
1127         bsf = u"features"
1128     elif u"scale" in test_name:
1129         bsf = u"scale"
1130     elif u"base" in test_name:
1131         bsf = u"base"
1132     else:
1133         bsf = u"base"
1134
1135     if u"114b" in test_name and u"vhost" in test_name:
1136         domain = u"vts"
1137     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1138         domain = u"nat44"
1139         if u"nat44det" in test_name:
1140             domain += u"-det-bidir"
1141         else:
1142             domain += u"-ed"
1143         if u"udir" in test_name:
1144             domain += u"-unidir"
1145         elif u"-ethip4udp-" in test_name:
1146             domain += u"-udp"
1147         elif u"-ethip4tcp-" in test_name:
1148             domain += u"-tcp"
1149         if u"-cps" in test_name:
1150             domain += u"-cps"
1151         elif u"-pps" in test_name:
1152             domain += u"-pps"
1153         elif u"-tput" in test_name:
1154             domain += u"-tput"
1155     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1156         domain = u"dpdk"
1157     elif u"memif" in test_name:
1158         domain = u"container_memif"
1159     elif u"srv6" in test_name:
1160         domain = u"srv6"
1161     elif u"vhost" in test_name:
1162         domain = u"vhost"
1163         if u"vppl2xc" in test_name:
1164             driver += u"-vpp"
1165         else:
1166             driver += u"-testpmd"
1167         if u"lbvpplacp" in test_name:
1168             bsf += u"-link-bonding"
1169     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1170         domain = u"nf_service_density_vnfc"
1171     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1172         domain = u"nf_service_density_cnfc"
1173     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1174         domain = u"nf_service_density_cnfp"
1175     elif u"ipsec" in test_name:
1176         domain = u"ipsec"
1177         if u"sw" in test_name:
1178             bsf += u"-sw"
1179         elif u"hw" in test_name:
1180             bsf += u"-hw"
1181     elif u"ethip4vxlan" in test_name:
1182         domain = u"ip4_tunnels"
1183     elif u"ethip4udpgeneve" in test_name:
1184         domain = u"ip4_tunnels"
1185     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1186         domain = u"ip4"
1187     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1188         domain = u"ip6"
1189     elif u"l2xcbase" in test_name or \
1190             u"l2xcscale" in test_name or \
1191             u"l2bdbasemaclrn" in test_name or \
1192             u"l2bdscale" in test_name or \
1193             u"l2patch" in test_name:
1194         domain = u"l2"
1195     else:
1196         domain = u""
1197
1198     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1199     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1200
1201     return file_name + anchor_name
1202
1203
1204 def table_perf_trending_dash_html(table, input_data):
1205     """Generate the table(s) with algorithm:
1206     table_perf_trending_dash_html specified in the specification
1207     file.
1208
1209     :param table: Table to generate.
1210     :param input_data: Data to process.
1211     :type table: dict
1212     :type input_data: InputData
1213     """
1214
1215     _ = input_data
1216
1217     if not table.get(u"testbed", None):
1218         logging.error(
1219             f"The testbed is not defined for the table "
1220             f"{table.get(u'title', u'')}. Skipping."
1221         )
1222         return
1223
1224     test_type = table.get(u"test-type", u"MRR")
1225     if test_type not in (u"MRR", u"NDR", u"PDR"):
1226         logging.error(
1227             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1228             f"Skipping."
1229         )
1230         return
1231
1232     if test_type in (u"NDR", u"PDR"):
1233         lnk_dir = u"../ndrpdr_trending/"
1234         lnk_sufix = f"-{test_type.lower()}"
1235     else:
1236         lnk_dir = u"../trending/"
1237         lnk_sufix = u""
1238
1239     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1240
1241     try:
1242         with open(table[u"input-file"], u'rt') as csv_file:
1243             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1244     except FileNotFoundError as err:
1245         logging.warning(f"{err}")
1246         return
1247     except KeyError:
1248         logging.warning(u"The input file is not defined.")
1249         return
1250     except csv.Error as err:
1251         logging.warning(
1252             f"Not possible to process the file {table[u'input-file']}.\n"
1253             f"{repr(err)}"
1254         )
1255         return
1256
1257     # Table:
1258     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1259
1260     # Table header:
1261     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1262     for idx, item in enumerate(csv_lst[0]):
1263         alignment = u"left" if idx == 0 else u"center"
1264         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1265         thead.text = item
1266
1267     # Rows:
1268     colors = {
1269         u"regression": (
1270             u"#ffcccc",
1271             u"#ff9999"
1272         ),
1273         u"progression": (
1274             u"#c6ecc6",
1275             u"#9fdf9f"
1276         ),
1277         u"normal": (
1278             u"#e9f1fb",
1279             u"#d4e4f7"
1280         )
1281     }
1282     for r_idx, row in enumerate(csv_lst[1:]):
1283         if int(row[4]):
1284             color = u"regression"
1285         elif int(row[5]):
1286             color = u"progression"
1287         else:
1288             color = u"normal"
1289         trow = ET.SubElement(
1290             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1291         )
1292
1293         # Columns:
1294         for c_idx, item in enumerate(row):
1295             tdata = ET.SubElement(
1296                 trow,
1297                 u"td",
1298                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1299             )
1300             # Name:
1301             if c_idx == 0 and table.get(u"add-links", True):
1302                 ref = ET.SubElement(
1303                     tdata,
1304                     u"a",
1305                     attrib=dict(
1306                         href=f"{lnk_dir}"
1307                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1308                         f"{lnk_sufix}"
1309                     )
1310                 )
1311                 ref.text = item
1312             else:
1313                 tdata.text = item
1314     try:
1315         with open(table[u"output-file"], u'w') as html_file:
1316             logging.info(f"    Writing file: {table[u'output-file']}")
1317             html_file.write(u".. raw:: html\n\n\t")
1318             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1319             html_file.write(u"\n\t<p><br><br></p>\n")
1320     except KeyError:
1321         logging.warning(u"The output file is not defined.")
1322         return
1323
1324
1325 def table_last_failed_tests(table, input_data):
1326     """Generate the table(s) with algorithm: table_last_failed_tests
1327     specified in the specification file.
1328
1329     :param table: Table to generate.
1330     :param input_data: Data to process.
1331     :type table: pandas.Series
1332     :type input_data: InputData
1333     """
1334
1335     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1336
1337     # Transform the data
1338     logging.info(
1339         f"    Creating the data set for the {table.get(u'type', u'')} "
1340         f"{table.get(u'title', u'')}."
1341     )
1342
1343     data = input_data.filter_data(table, continue_on_error=True)
1344
1345     if data is None or data.empty:
1346         logging.warning(
1347             f"    No data for the {table.get(u'type', u'')} "
1348             f"{table.get(u'title', u'')}."
1349         )
1350         return
1351
1352     tbl_list = list()
1353     for job, builds in table[u"data"].items():
1354         for build in builds:
1355             build = str(build)
1356             try:
1357                 version = input_data.metadata(job, build).get(u"version", u"")
1358                 duration = \
1359                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1360             except KeyError:
1361                 logging.error(f"Data for {job}: {build} is not present.")
1362                 return
1363             tbl_list.append(build)
1364             tbl_list.append(version)
1365             failed_tests = list()
1366             passed = 0
1367             failed = 0
1368             for tst_data in data[job][build].values:
1369                 if tst_data[u"status"] != u"FAIL":
1370                     passed += 1
1371                     continue
1372                 failed += 1
1373                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1374                 if not groups:
1375                     continue
1376                 nic = groups.group(0)
1377                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1378             tbl_list.append(passed)
1379             tbl_list.append(failed)
1380             tbl_list.append(duration)
1381             tbl_list.extend(failed_tests)
1382
1383     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1384     logging.info(f"    Writing file: {file_name}")
1385     with open(file_name, u"wt") as file_handler:
1386         for test in tbl_list:
1387             file_handler.write(f"{test}\n")
1388
1389
1390 def table_failed_tests(table, input_data):
1391     """Generate the table(s) with algorithm: table_failed_tests
1392     specified in the specification file.
1393
1394     :param table: Table to generate.
1395     :param input_data: Data to process.
1396     :type table: pandas.Series
1397     :type input_data: InputData
1398     """
1399
1400     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1401
1402     # Transform the data
1403     logging.info(
1404         f"    Creating the data set for the {table.get(u'type', u'')} "
1405         f"{table.get(u'title', u'')}."
1406     )
1407     data = input_data.filter_data(table, continue_on_error=True)
1408
1409     test_type = u"MRR"
1410     if u"NDRPDR" in table.get(u"filter", list()):
1411         test_type = u"NDRPDR"
1412
1413     # Prepare the header of the tables
1414     header = [
1415         u"Test Case",
1416         u"Failures [#]",
1417         u"Last Failure [Time]",
1418         u"Last Failure [VPP-Build-Id]",
1419         u"Last Failure [CSIT-Job-Build-Id]"
1420     ]
1421
1422     # Generate the data for the table according to the model in the table
1423     # specification
1424
1425     now = dt.utcnow()
1426     timeperiod = timedelta(int(table.get(u"window", 7)))
1427
1428     tbl_dict = dict()
1429     for job, builds in table[u"data"].items():
1430         for build in builds:
1431             build = str(build)
1432             for tst_name, tst_data in data[job][build].items():
1433                 if tst_name.lower() in table.get(u"ignore-list", list()):
1434                     continue
1435                 if tbl_dict.get(tst_name, None) is None:
1436                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1437                     if not groups:
1438                         continue
1439                     nic = groups.group(0)
1440                     tbl_dict[tst_name] = {
1441                         u"name": f"{nic}-{tst_data[u'name']}",
1442                         u"data": OrderedDict()
1443                     }
1444                 try:
1445                     generated = input_data.metadata(job, build).\
1446                         get(u"generated", u"")
1447                     if not generated:
1448                         continue
1449                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1450                     if (now - then) <= timeperiod:
1451                         tbl_dict[tst_name][u"data"][build] = (
1452                             tst_data[u"status"],
1453                             generated,
1454                             input_data.metadata(job, build).get(u"version",
1455                                                                 u""),
1456                             build
1457                         )
1458                 except (TypeError, KeyError) as err:
1459                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1460
1461     max_fails = 0
1462     tbl_lst = list()
1463     for tst_data in tbl_dict.values():
1464         fails_nr = 0
1465         fails_last_date = u""
1466         fails_last_vpp = u""
1467         fails_last_csit = u""
1468         for val in tst_data[u"data"].values():
1469             if val[0] == u"FAIL":
1470                 fails_nr += 1
1471                 fails_last_date = val[1]
1472                 fails_last_vpp = val[2]
1473                 fails_last_csit = val[3]
1474         if fails_nr:
1475             max_fails = fails_nr if fails_nr > max_fails else max_fails
1476             tbl_lst.append([
1477                 tst_data[u"name"],
1478                 fails_nr,
1479                 fails_last_date,
1480                 fails_last_vpp,
1481                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1482                 f"-build-{fails_last_csit}"
1483             ])
1484
1485     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1486     tbl_sorted = list()
1487     for nrf in range(max_fails, -1, -1):
1488         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1489         tbl_sorted.extend(tbl_fails)
1490
1491     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1492     logging.info(f"    Writing file: {file_name}")
1493     with open(file_name, u"wt") as file_handler:
1494         file_handler.write(u",".join(header) + u"\n")
1495         for test in tbl_sorted:
1496             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1497
1498     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1499     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1500
1501
1502 def table_failed_tests_html(table, input_data):
1503     """Generate the table(s) with algorithm: table_failed_tests_html
1504     specified in the specification file.
1505
1506     :param table: Table to generate.
1507     :param input_data: Data to process.
1508     :type table: pandas.Series
1509     :type input_data: InputData
1510     """
1511
1512     _ = input_data
1513
1514     if not table.get(u"testbed", None):
1515         logging.error(
1516             f"The testbed is not defined for the table "
1517             f"{table.get(u'title', u'')}. Skipping."
1518         )
1519         return
1520
1521     test_type = table.get(u"test-type", u"MRR")
1522     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1523         logging.error(
1524             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1525             f"Skipping."
1526         )
1527         return
1528
1529     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1530         lnk_dir = u"../ndrpdr_trending/"
1531         lnk_sufix = u"-pdr"
1532     else:
1533         lnk_dir = u"../trending/"
1534         lnk_sufix = u""
1535
1536     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1537
1538     try:
1539         with open(table[u"input-file"], u'rt') as csv_file:
1540             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1541     except KeyError:
1542         logging.warning(u"The input file is not defined.")
1543         return
1544     except csv.Error as err:
1545         logging.warning(
1546             f"Not possible to process the file {table[u'input-file']}.\n"
1547             f"{repr(err)}"
1548         )
1549         return
1550
1551     # Table:
1552     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1553
1554     # Table header:
1555     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1556     for idx, item in enumerate(csv_lst[0]):
1557         alignment = u"left" if idx == 0 else u"center"
1558         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1559         thead.text = item
1560
1561     # Rows:
1562     colors = (u"#e9f1fb", u"#d4e4f7")
1563     for r_idx, row in enumerate(csv_lst[1:]):
1564         background = colors[r_idx % 2]
1565         trow = ET.SubElement(
1566             failed_tests, u"tr", attrib=dict(bgcolor=background)
1567         )
1568
1569         # Columns:
1570         for c_idx, item in enumerate(row):
1571             tdata = ET.SubElement(
1572                 trow,
1573                 u"td",
1574                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1575             )
1576             # Name:
1577             if c_idx == 0 and table.get(u"add-links", True):
1578                 ref = ET.SubElement(
1579                     tdata,
1580                     u"a",
1581                     attrib=dict(
1582                         href=f"{lnk_dir}"
1583                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1584                         f"{lnk_sufix}"
1585                     )
1586                 )
1587                 ref.text = item
1588             else:
1589                 tdata.text = item
1590     try:
1591         with open(table[u"output-file"], u'w') as html_file:
1592             logging.info(f"    Writing file: {table[u'output-file']}")
1593             html_file.write(u".. raw:: html\n\n\t")
1594             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1595             html_file.write(u"\n\t<p><br><br></p>\n")
1596     except KeyError:
1597         logging.warning(u"The output file is not defined.")
1598         return
1599
1600
1601 def table_comparison(table, input_data):
1602     """Generate the table(s) with algorithm: table_comparison
1603     specified in the specification file.
1604
1605     :param table: Table to generate.
1606     :param input_data: Data to process.
1607     :type table: pandas.Series
1608     :type input_data: InputData
1609     """
1610     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1611
1612     # Transform the data
1613     logging.info(
1614         f"    Creating the data set for the {table.get(u'type', u'')} "
1615         f"{table.get(u'title', u'')}."
1616     )
1617
1618     columns = table.get(u"columns", None)
1619     if not columns:
1620         logging.error(
1621             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1622         )
1623         return
1624
1625     cols = list()
1626     for idx, col in enumerate(columns):
1627         if col.get(u"data-set", None) is None:
1628             logging.warning(f"No data for column {col.get(u'title', u'')}")
1629             continue
1630         tag = col.get(u"tag", None)
1631         data = input_data.filter_data(
1632             table,
1633             params=[
1634                 u"throughput",
1635                 u"result",
1636                 u"latency",
1637                 u"name",
1638                 u"parent",
1639                 u"tags"
1640             ],
1641             data=col[u"data-set"],
1642             continue_on_error=True
1643         )
1644         col_data = {
1645             u"title": col.get(u"title", f"Column{idx}"),
1646             u"data": dict()
1647         }
1648         for builds in data.values:
1649             for build in builds:
1650                 for tst_name, tst_data in build.items():
1651                     if tag and tag not in tst_data[u"tags"]:
1652                         continue
1653                     tst_name_mod = \
1654                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1655                         replace(u"2n1l-", u"")
1656                     if col_data[u"data"].get(tst_name_mod, None) is None:
1657                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1658                         if u"across testbeds" in table[u"title"].lower() or \
1659                                 u"across topologies" in table[u"title"].lower():
1660                             name = _tpc_modify_displayed_test_name(name)
1661                         col_data[u"data"][tst_name_mod] = {
1662                             u"name": name,
1663                             u"replace": True,
1664                             u"data": list(),
1665                             u"mean": None,
1666                             u"stdev": None
1667                         }
1668                     _tpc_insert_data(
1669                         target=col_data[u"data"][tst_name_mod],
1670                         src=tst_data,
1671                         include_tests=table[u"include-tests"]
1672                     )
1673
1674         replacement = col.get(u"data-replacement", None)
1675         if replacement:
1676             rpl_data = input_data.filter_data(
1677                 table,
1678                 params=[
1679                     u"throughput",
1680                     u"result",
1681                     u"latency",
1682                     u"name",
1683                     u"parent",
1684                     u"tags"
1685                 ],
1686                 data=replacement,
1687                 continue_on_error=True
1688             )
1689             for builds in rpl_data.values:
1690                 for build in builds:
1691                     for tst_name, tst_data in build.items():
1692                         if tag and tag not in tst_data[u"tags"]:
1693                             continue
1694                         tst_name_mod = \
1695                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1696                             replace(u"2n1l-", u"")
1697                         if col_data[u"data"].get(tst_name_mod, None) is None:
1698                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1699                             if u"across testbeds" in table[u"title"].lower() \
1700                                     or u"across topologies" in \
1701                                     table[u"title"].lower():
1702                                 name = _tpc_modify_displayed_test_name(name)
1703                             col_data[u"data"][tst_name_mod] = {
1704                                 u"name": name,
1705                                 u"replace": False,
1706                                 u"data": list(),
1707                                 u"mean": None,
1708                                 u"stdev": None
1709                             }
1710                         if col_data[u"data"][tst_name_mod][u"replace"]:
1711                             col_data[u"data"][tst_name_mod][u"replace"] = False
1712                             col_data[u"data"][tst_name_mod][u"data"] = list()
1713                         _tpc_insert_data(
1714                             target=col_data[u"data"][tst_name_mod],
1715                             src=tst_data,
1716                             include_tests=table[u"include-tests"]
1717                         )
1718
1719         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1720                 u"latency" in table[u"include-tests"]:
1721             for tst_name, tst_data in col_data[u"data"].items():
1722                 if tst_data[u"data"]:
1723                     tst_data[u"mean"] = mean(tst_data[u"data"])
1724                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1725
1726         cols.append(col_data)
1727
1728     tbl_dict = dict()
1729     for col in cols:
1730         for tst_name, tst_data in col[u"data"].items():
1731             if tbl_dict.get(tst_name, None) is None:
1732                 tbl_dict[tst_name] = {
1733                     "name": tst_data[u"name"]
1734                 }
1735             tbl_dict[tst_name][col[u"title"]] = {
1736                 u"mean": tst_data[u"mean"],
1737                 u"stdev": tst_data[u"stdev"]
1738             }
1739
1740     if not tbl_dict:
1741         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1742         return
1743
1744     tbl_lst = list()
1745     for tst_data in tbl_dict.values():
1746         row = [tst_data[u"name"], ]
1747         for col in cols:
1748             row.append(tst_data.get(col[u"title"], None))
1749         tbl_lst.append(row)
1750
1751     comparisons = table.get(u"comparisons", None)
1752     rcas = list()
1753     if comparisons and isinstance(comparisons, list):
1754         for idx, comp in enumerate(comparisons):
1755             try:
1756                 col_ref = int(comp[u"reference"])
1757                 col_cmp = int(comp[u"compare"])
1758             except KeyError:
1759                 logging.warning(u"Comparison: No references defined! Skipping.")
1760                 comparisons.pop(idx)
1761                 continue
1762             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1763                     col_ref == col_cmp):
1764                 logging.warning(f"Wrong values of reference={col_ref} "
1765                                 f"and/or compare={col_cmp}. Skipping.")
1766                 comparisons.pop(idx)
1767                 continue
1768             rca_file_name = comp.get(u"rca-file", None)
1769             if rca_file_name:
1770                 try:
1771                     with open(rca_file_name, u"r") as file_handler:
1772                         rcas.append(
1773                             {
1774                                 u"title": f"RCA{idx + 1}",
1775                                 u"data": load(file_handler, Loader=FullLoader)
1776                             }
1777                         )
1778                 except (YAMLError, IOError) as err:
1779                     logging.warning(
1780                         f"The RCA file {rca_file_name} does not exist or "
1781                         f"it is corrupted!"
1782                     )
1783                     logging.debug(repr(err))
1784                     rcas.append(None)
1785             else:
1786                 rcas.append(None)
1787     else:
1788         comparisons = None
1789
1790     tbl_cmp_lst = list()
1791     if comparisons:
1792         for row in tbl_lst:
1793             new_row = deepcopy(row)
1794             for comp in comparisons:
1795                 ref_itm = row[int(comp[u"reference"])]
1796                 if ref_itm is None and \
1797                         comp.get(u"reference-alt", None) is not None:
1798                     ref_itm = row[int(comp[u"reference-alt"])]
1799                 cmp_itm = row[int(comp[u"compare"])]
1800                 if ref_itm is not None and cmp_itm is not None and \
1801                         ref_itm[u"mean"] is not None and \
1802                         cmp_itm[u"mean"] is not None and \
1803                         ref_itm[u"stdev"] is not None and \
1804                         cmp_itm[u"stdev"] is not None:
1805                     try:
1806                         delta, d_stdev = relative_change_stdev(
1807                             ref_itm[u"mean"], cmp_itm[u"mean"],
1808                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1809                         )
1810                     except ZeroDivisionError:
1811                         break
1812                     if delta in (None, float(u"nan"), u"nan", u"NaN"):
1813                         break
1814                     new_row.append({
1815                         u"mean": delta * 1e6,
1816                         u"stdev": d_stdev * 1e6
1817                     })
1818                 else:
1819                     break
1820             else:
1821                 tbl_cmp_lst.append(new_row)
1822
1823     try:
1824         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1825         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1826     except TypeError as err:
1827         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1828
1829     tbl_for_csv = list()
1830     for line in tbl_cmp_lst:
1831         row = [line[0], ]
1832         for idx, itm in enumerate(line[1:]):
1833             if itm is None or not isinstance(itm, dict) or\
1834                     itm.get(u'mean', None) is None or \
1835                     itm.get(u'stdev', None) is None:
1836                 row.append(u"NT")
1837                 row.append(u"NT")
1838             else:
1839                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1840                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1841         for rca in rcas:
1842             if rca is None:
1843                 continue
1844             rca_nr = rca[u"data"].get(row[0], u"-")
1845             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1846         tbl_for_csv.append(row)
1847
1848     header_csv = [u"Test Case", ]
1849     for col in cols:
1850         header_csv.append(f"Avg({col[u'title']})")
1851         header_csv.append(f"Stdev({col[u'title']})")
1852     for comp in comparisons:
1853         header_csv.append(
1854             f"Avg({comp.get(u'title', u'')})"
1855         )
1856         header_csv.append(
1857             f"Stdev({comp.get(u'title', u'')})"
1858         )
1859     for rca in rcas:
1860         if rca:
1861             header_csv.append(rca[u"title"])
1862
1863     legend_lst = table.get(u"legend", None)
1864     if legend_lst is None:
1865         legend = u""
1866     else:
1867         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1868
1869     footnote = u""
1870     if rcas and any(rcas):
1871         footnote += u"\nRoot Cause Analysis:\n"
1872         for rca in rcas:
1873             if rca:
1874                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1875
1876     csv_file_name = f"{table[u'output-file']}-csv.csv"
1877     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1878         file_handler.write(
1879             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1880         )
1881         for test in tbl_for_csv:
1882             file_handler.write(
1883                 u",".join([f'"{item}"' for item in test]) + u"\n"
1884             )
1885         if legend_lst:
1886             for item in legend_lst:
1887                 file_handler.write(f'"{item}"\n')
1888         if footnote:
1889             for itm in footnote.split(u"\n"):
1890                 file_handler.write(f'"{itm}"\n')
1891
1892     tbl_tmp = list()
1893     max_lens = [0, ] * len(tbl_cmp_lst[0])
1894     for line in tbl_cmp_lst:
1895         row = [line[0], ]
1896         for idx, itm in enumerate(line[1:]):
1897             if itm is None or not isinstance(itm, dict) or \
1898                     itm.get(u'mean', None) is None or \
1899                     itm.get(u'stdev', None) is None:
1900                 new_itm = u"NT"
1901             else:
1902                 if idx < len(cols):
1903                     new_itm = (
1904                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1905                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1906                         replace(u"nan", u"NaN")
1907                     )
1908                 else:
1909                     new_itm = (
1910                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1911                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1912                         replace(u"nan", u"NaN")
1913                     )
1914             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1915                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1916             row.append(new_itm)
1917
1918         tbl_tmp.append(row)
1919
1920     header = [u"Test Case", ]
1921     header.extend([col[u"title"] for col in cols])
1922     header.extend([comp.get(u"title", u"") for comp in comparisons])
1923
1924     tbl_final = list()
1925     for line in tbl_tmp:
1926         row = [line[0], ]
1927         for idx, itm in enumerate(line[1:]):
1928             if itm in (u"NT", u"NaN"):
1929                 row.append(itm)
1930                 continue
1931             itm_lst = itm.rsplit(u"\u00B1", 1)
1932             itm_lst[-1] = \
1933                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1934             itm_str = u"\u00B1".join(itm_lst)
1935
1936             if idx >= len(cols):
1937                 # Diffs
1938                 rca = rcas[idx - len(cols)]
1939                 if rca:
1940                     # Add rcas to diffs
1941                     rca_nr = rca[u"data"].get(row[0], None)
1942                     if rca_nr:
1943                         hdr_len = len(header[idx + 1]) - 1
1944                         if hdr_len < 19:
1945                             hdr_len = 19
1946                         rca_nr = f"[{rca_nr}]"
1947                         itm_str = (
1948                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1949                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1950                             f"{itm_str}"
1951                         )
1952             row.append(itm_str)
1953         tbl_final.append(row)
1954
1955     # Generate csv tables:
1956     csv_file_name = f"{table[u'output-file']}.csv"
1957     logging.info(f"    Writing the file {csv_file_name}")
1958     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1959         file_handler.write(u";".join(header) + u"\n")
1960         for test in tbl_final:
1961             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1962
1963     # Generate txt table:
1964     txt_file_name = f"{table[u'output-file']}.txt"
1965     logging.info(f"    Writing the file {txt_file_name}")
1966     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1967
1968     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1969         file_handler.write(legend)
1970         file_handler.write(footnote)
1971
1972     # Generate html table:
1973     _tpc_generate_html_table(
1974         header,
1975         tbl_final,
1976         table[u'output-file'],
1977         legend=legend,
1978         footnote=footnote,
1979         sort_data=False,
1980         title=table.get(u"title", u"")
1981     )
1982
1983
1984 def table_weekly_comparison(table, in_data):
1985     """Generate the table(s) with algorithm: table_weekly_comparison
1986     specified in the specification file.
1987
1988     :param table: Table to generate.
1989     :param in_data: Data to process.
1990     :type table: pandas.Series
1991     :type in_data: InputData
1992     """
1993     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1994
1995     # Transform the data
1996     logging.info(
1997         f"    Creating the data set for the {table.get(u'type', u'')} "
1998         f"{table.get(u'title', u'')}."
1999     )
2000
2001     incl_tests = table.get(u"include-tests", None)
2002     if incl_tests not in (u"NDR", u"PDR"):
2003         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2004         return
2005
2006     nr_cols = table.get(u"nr-of-data-columns", None)
2007     if not nr_cols or nr_cols < 2:
2008         logging.error(
2009             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2010         )
2011         return
2012
2013     data = in_data.filter_data(
2014         table,
2015         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2016         continue_on_error=True
2017     )
2018
2019     header = [
2020         [u"VPP Version", ],
2021         [u"Start Timestamp", ],
2022         [u"CSIT Build", ],
2023         [u"CSIT Testbed", ]
2024     ]
2025     tbl_dict = dict()
2026     idx = 0
2027     tb_tbl = table.get(u"testbeds", None)
2028     for job_name, job_data in data.items():
2029         for build_nr, build in job_data.items():
2030             if idx >= nr_cols:
2031                 break
2032             if build.empty:
2033                 continue
2034
2035             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2036             if tb_ip and tb_tbl:
2037                 testbed = tb_tbl.get(tb_ip, u"")
2038             else:
2039                 testbed = u""
2040             header[2].insert(1, build_nr)
2041             header[3].insert(1, testbed)
2042             header[1].insert(
2043                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2044             )
2045             header[0].insert(
2046                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2047             )
2048
2049             for tst_name, tst_data in build.items():
2050                 tst_name_mod = \
2051                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2052                 if not tbl_dict.get(tst_name_mod, None):
2053                     tbl_dict[tst_name_mod] = dict(
2054                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2055                     )
2056                 try:
2057                     tbl_dict[tst_name_mod][-idx - 1] = \
2058                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2059                 except (TypeError, IndexError, KeyError, ValueError):
2060                     pass
2061             idx += 1
2062
2063     if idx < nr_cols:
2064         logging.error(u"Not enough data to build the table! Skipping")
2065         return
2066
2067     cmp_dict = dict()
2068     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2069         idx_ref = cmp.get(u"reference", None)
2070         idx_cmp = cmp.get(u"compare", None)
2071         if idx_ref is None or idx_cmp is None:
2072             continue
2073         header[0].append(
2074             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2075             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2076         )
2077         header[1].append(u"")
2078         header[2].append(u"")
2079         header[3].append(u"")
2080         for tst_name, tst_data in tbl_dict.items():
2081             if not cmp_dict.get(tst_name, None):
2082                 cmp_dict[tst_name] = list()
2083             ref_data = tst_data.get(idx_ref, None)
2084             cmp_data = tst_data.get(idx_cmp, None)
2085             if ref_data is None or cmp_data is None:
2086                 cmp_dict[tst_name].append(float(u'nan'))
2087             else:
2088                 cmp_dict[tst_name].append(
2089                     relative_change(ref_data, cmp_data)
2090                 )
2091
2092     tbl_lst_none = list()
2093     tbl_lst = list()
2094     for tst_name, tst_data in tbl_dict.items():
2095         itm_lst = [tst_data[u"name"], ]
2096         for idx in range(nr_cols):
2097             item = tst_data.get(-idx - 1, None)
2098             if item is None:
2099                 itm_lst.insert(1, None)
2100             else:
2101                 itm_lst.insert(1, round(item / 1e6, 1))
2102         itm_lst.extend(
2103             [
2104                 None if itm is None else round(itm, 1)
2105                 for itm in cmp_dict[tst_name]
2106             ]
2107         )
2108         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2109             tbl_lst_none.append(itm_lst)
2110         else:
2111             tbl_lst.append(itm_lst)
2112
2113     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2114     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2115     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2116     tbl_lst.extend(tbl_lst_none)
2117
2118     # Generate csv table:
2119     csv_file_name = f"{table[u'output-file']}.csv"
2120     logging.info(f"    Writing the file {csv_file_name}")
2121     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2122         for hdr in header:
2123             file_handler.write(u",".join(hdr) + u"\n")
2124         for test in tbl_lst:
2125             file_handler.write(u",".join(
2126                 [
2127                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2128                     replace(u"null", u"-") for item in test
2129                 ]
2130             ) + u"\n")
2131
2132     txt_file_name = f"{table[u'output-file']}.txt"
2133     logging.info(f"    Writing the file {txt_file_name}")
2134     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2135
2136     # Reorganize header in txt table
2137     txt_table = list()
2138     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2139         for line in list(file_handler):
2140             txt_table.append(line)
2141     try:
2142         txt_table.insert(5, txt_table.pop(2))
2143         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2144             file_handler.writelines(txt_table)
2145     except IndexError:
2146         pass
2147
2148     # Generate html table:
2149     hdr_html = [
2150         u"<br>".join(row) for row in zip(*header)
2151     ]
2152     _tpc_generate_html_table(
2153         hdr_html,
2154         tbl_lst,
2155         table[u'output-file'],
2156         sort_data=True,
2157         title=table.get(u"title", u""),
2158         generate_rst=False
2159     )