351250a4d24d0554758a886ea39f92ac607a1835
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28 from json import loads
29
30 import plotly.graph_objects as go
31 import plotly.offline as ploff
32 import pandas as pd
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison
64     }
65
66     logging.info(u"Generating the tables ...")
67     for table in spec.tables:
68         try:
69             if table[u"algorithm"] == u"table_weekly_comparison":
70                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
71             generator[table[u"algorithm"]](table, data)
72         except NameError as err:
73             logging.error(
74                 f"Probably algorithm {table[u'algorithm']} is not defined: "
75                 f"{repr(err)}"
76             )
77     logging.info(u"Done.")
78
79
80 def table_oper_data_html(table, input_data):
81     """Generate the table(s) with algorithm: html_table_oper_data
82     specified in the specification file.
83
84     :param table: Table to generate.
85     :param input_data: Data to process.
86     :type table: pandas.Series
87     :type input_data: InputData
88     """
89
90     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
91     # Transform the data
92     logging.info(
93         f"    Creating the data set for the {table.get(u'type', u'')} "
94         f"{table.get(u'title', u'')}."
95     )
96     data = input_data.filter_data(
97         table,
98         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
99         continue_on_error=True
100     )
101     if data.empty:
102         return
103     data = input_data.merge_data(data)
104
105     sort_tests = table.get(u"sort", None)
106     if sort_tests:
107         args = dict(
108             inplace=True,
109             ascending=(sort_tests == u"ascending")
110         )
111         data.sort_index(**args)
112
113     suites = input_data.filter_data(
114         table,
115         continue_on_error=True,
116         data_set=u"suites"
117     )
118     if suites.empty:
119         return
120     suites = input_data.merge_data(suites)
121
122     def _generate_html_table(tst_data):
123         """Generate an HTML table with operational data for the given test.
124
125         :param tst_data: Test data to be used to generate the table.
126         :type tst_data: pandas.Series
127         :returns: HTML table with operational data.
128         :rtype: str
129         """
130
131         colors = {
132             u"header": u"#7eade7",
133             u"empty": u"#ffffff",
134             u"body": (u"#e9f1fb", u"#d4e4f7")
135         }
136
137         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138
139         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
140         thead = ET.SubElement(
141             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142         )
143         thead.text = tst_data[u"name"]
144
145         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
146         thead = ET.SubElement(
147             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
148         )
149         thead.text = u"\t"
150
151         if tst_data.get(u"telemetry-show-run", None) is None or \
152                 isinstance(tst_data[u"telemetry-show-run"], str):
153             trow = ET.SubElement(
154                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155             )
156             tcol = ET.SubElement(
157                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158             )
159             tcol.text = u"No Data"
160
161             trow = ET.SubElement(
162                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163             )
164             thead = ET.SubElement(
165                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166             )
167             font = ET.SubElement(
168                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
169             )
170             font.text = u"."
171             return str(ET.tostring(tbl, encoding=u"unicode"))
172
173         tbl_hdr = (
174             u"Name",
175             u"Nr of Vectors",
176             u"Nr of Packets",
177             u"Suspends",
178             u"Cycles per Packet",
179             u"Average Vector Size"
180         )
181
182         for dut_data in tst_data[u"telemetry-show-run"].values():
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185             )
186             tcol = ET.SubElement(
187                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188             )
189             if dut_data.get(u"runtime", None) is None:
190                 tcol.text = u"No Data"
191                 continue
192
193             runtime = dict()
194             for item in dut_data[u"runtime"].get(u"data", tuple()):
195                 tid = int(item[u"labels"][u"thread_id"])
196                 if runtime.get(tid, None) is None:
197                     runtime[tid] = dict()
198                 gnode = item[u"labels"][u"graph_node"]
199                 if runtime[tid].get(gnode, None) is None:
200                     runtime[tid][gnode] = dict()
201                 try:
202                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
203                 except ValueError:
204                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
205
206             threads = dict({idx: list() for idx in range(len(runtime))})
207             for idx, run_data in runtime.items():
208                 for gnode, gdata in run_data.items():
209                     if gdata[u"vectors"] > 0:
210                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
211                     elif gdata[u"calls"] > 0:
212                         clocks = gdata[u"clocks"] / gdata[u"calls"]
213                     elif gdata[u"suspends"] > 0:
214                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
215                     else:
216                         clocks = 0.0
217                     if gdata[u"calls"] > 0:
218                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
219                     else:
220                         vectors_call = 0.0
221                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
222                             int(gdata[u"suspends"]):
223                         threads[idx].append([
224                             gnode,
225                             int(gdata[u"calls"]),
226                             int(gdata[u"vectors"]),
227                             int(gdata[u"suspends"]),
228                             clocks,
229                             vectors_call
230                         ])
231
232             bold = ET.SubElement(tcol, u"b")
233             bold.text = (
234                 f"Host IP: {dut_data.get(u'host', '')}, "
235                 f"Socket: {dut_data.get(u'socket', '')}"
236             )
237             trow = ET.SubElement(
238                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239             )
240             thead = ET.SubElement(
241                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242             )
243             thead.text = u"\t"
244
245             for thread_nr, thread in threads.items():
246                 trow = ET.SubElement(
247                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
248                 )
249                 tcol = ET.SubElement(
250                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
251                 )
252                 bold = ET.SubElement(tcol, u"b")
253                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
254                 trow = ET.SubElement(
255                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256                 )
257                 for idx, col in enumerate(tbl_hdr):
258                     tcol = ET.SubElement(
259                         trow, u"td",
260                         attrib=dict(align=u"right" if idx else u"left")
261                     )
262                     font = ET.SubElement(
263                         tcol, u"font", attrib=dict(size=u"2")
264                     )
265                     bold = ET.SubElement(font, u"b")
266                     bold.text = col
267                 for row_nr, row in enumerate(thread):
268                     trow = ET.SubElement(
269                         tbl, u"tr",
270                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
271                     )
272                     for idx, col in enumerate(row):
273                         tcol = ET.SubElement(
274                             trow, u"td",
275                             attrib=dict(align=u"right" if idx else u"left")
276                         )
277                         font = ET.SubElement(
278                             tcol, u"font", attrib=dict(size=u"2")
279                         )
280                         if isinstance(col, float):
281                             font.text = f"{col:.2f}"
282                         else:
283                             font.text = str(col)
284                 trow = ET.SubElement(
285                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
286                 )
287                 thead = ET.SubElement(
288                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
289                 )
290                 thead.text = u"\t"
291
292         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
293         thead = ET.SubElement(
294             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
295         )
296         font = ET.SubElement(
297             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
298         )
299         font.text = u"."
300
301         return str(ET.tostring(tbl, encoding=u"unicode"))
302
303     for suite in suites.values:
304         html_table = str()
305         for test_data in data.values:
306             if test_data[u"parent"] not in suite[u"name"]:
307                 continue
308             html_table += _generate_html_table(test_data)
309         if not html_table:
310             continue
311         try:
312             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
313             with open(f"{file_name}", u'w') as html_file:
314                 logging.info(f"    Writing file: {file_name}")
315                 html_file.write(u".. raw:: html\n\n\t")
316                 html_file.write(html_table)
317                 html_file.write(u"\n\t<p><br><br></p>\n")
318         except KeyError:
319             logging.warning(u"The output file is not defined.")
320             return
321     logging.info(u"  Done.")
322
323
324 def table_merged_details(table, input_data):
325     """Generate the table(s) with algorithm: table_merged_details
326     specified in the specification file.
327
328     :param table: Table to generate.
329     :param input_data: Data to process.
330     :type table: pandas.Series
331     :type input_data: InputData
332     """
333
334     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
335
336     # Transform the data
337     logging.info(
338         f"    Creating the data set for the {table.get(u'type', u'')} "
339         f"{table.get(u'title', u'')}."
340     )
341     data = input_data.filter_data(table, continue_on_error=True)
342     data = input_data.merge_data(data)
343
344     sort_tests = table.get(u"sort", None)
345     if sort_tests:
346         args = dict(
347             inplace=True,
348             ascending=(sort_tests == u"ascending")
349         )
350         data.sort_index(**args)
351
352     suites = input_data.filter_data(
353         table, continue_on_error=True, data_set=u"suites")
354     suites = input_data.merge_data(suites)
355
356     # Prepare the header of the tables
357     header = list()
358     for column in table[u"columns"]:
359         header.append(
360             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
361         )
362
363     for suite in suites.values:
364         # Generate data
365         suite_name = suite[u"name"]
366         table_lst = list()
367         for test in data.keys():
368             if data[test][u"status"] != u"PASS" or \
369                     data[test][u"parent"] not in suite_name:
370                 continue
371             row_lst = list()
372             for column in table[u"columns"]:
373                 try:
374                     col_data = str(data[test][column[
375                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
376                     # Do not include tests with "Test Failed" in test message
377                     if u"Test Failed" in col_data:
378                         continue
379                     col_data = col_data.replace(
380                         u"No Data", u"Not Captured     "
381                     )
382                     if column[u"data"].split(u" ")[1] in (u"name", ):
383                         if len(col_data) > 30:
384                             col_data_lst = col_data.split(u"-")
385                             half = int(len(col_data_lst) / 2)
386                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
387                                        f"- |br| " \
388                                        f"{u'-'.join(col_data_lst[half:])}"
389                         col_data = f" |prein| {col_data} |preout| "
390                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
391                         # Temporary solution: remove NDR results from message:
392                         if bool(table.get(u'remove-ndr', False)):
393                             try:
394                                 col_data = col_data.split(u"\n", 1)[1]
395                             except IndexError:
396                                 pass
397                         col_data = col_data.replace(u'\n', u' |br| ').\
398                             replace(u'\r', u'').replace(u'"', u"'")
399                         col_data = f" |prein| {col_data} |preout| "
400                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
401                         col_data = col_data.replace(u'\n', u' |br| ')
402                         col_data = f" |prein| {col_data[:-5]} |preout| "
403                     row_lst.append(f'"{col_data}"')
404                 except KeyError:
405                     row_lst.append(u'"Not captured"')
406             if len(row_lst) == len(table[u"columns"]):
407                 table_lst.append(row_lst)
408
409         # Write the data to file
410         if table_lst:
411             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
412             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
413             logging.info(f"      Writing file: {file_name}")
414             with open(file_name, u"wt") as file_handler:
415                 file_handler.write(u",".join(header) + u"\n")
416                 for item in table_lst:
417                     file_handler.write(u",".join(item) + u"\n")
418
419     logging.info(u"  Done.")
420
421
422 def _tpc_modify_test_name(test_name, ignore_nic=False):
423     """Modify a test name by replacing its parts.
424
425     :param test_name: Test name to be modified.
426     :param ignore_nic: If True, NIC is removed from TC name.
427     :type test_name: str
428     :type ignore_nic: bool
429     :returns: Modified test name.
430     :rtype: str
431     """
432     test_name_mod = test_name.\
433         replace(u"-ndrpdr", u"").\
434         replace(u"1t1c", u"1c").\
435         replace(u"2t1c", u"1c"). \
436         replace(u"2t2c", u"2c").\
437         replace(u"4t2c", u"2c"). \
438         replace(u"4t4c", u"4c").\
439         replace(u"8t4c", u"4c")
440
441     if ignore_nic:
442         return re.sub(REGEX_NIC, u"", test_name_mod)
443     return test_name_mod
444
445
446 def _tpc_modify_displayed_test_name(test_name):
447     """Modify a test name which is displayed in a table by replacing its parts.
448
449     :param test_name: Test name to be modified.
450     :type test_name: str
451     :returns: Modified test name.
452     :rtype: str
453     """
454     return test_name.\
455         replace(u"1t1c", u"1c").\
456         replace(u"2t1c", u"1c"). \
457         replace(u"2t2c", u"2c").\
458         replace(u"4t2c", u"2c"). \
459         replace(u"4t4c", u"4c").\
460         replace(u"8t4c", u"4c")
461
462
463 def _tpc_insert_data(target, src, include_tests):
464     """Insert src data to the target structure.
465
466     :param target: Target structure where the data is placed.
467     :param src: Source data to be placed into the target structure.
468     :param include_tests: Which results will be included (MRR, NDR, PDR).
469     :type target: list
470     :type src: dict
471     :type include_tests: str
472     """
473     try:
474         if include_tests == u"MRR":
475             target[u"mean"] = src[u"result"][u"receive-rate"]
476             target[u"stdev"] = src[u"result"][u"receive-stdev"]
477         elif include_tests == u"PDR":
478             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
479         elif include_tests == u"NDR":
480             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
481         elif u"latency" in include_tests:
482             keys = include_tests.split(u"-")
483             if len(keys) == 4:
484                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
485                 target[u"data"].append(
486                     float(u"nan") if lat == -1 else lat * 1e6
487                 )
488     except (KeyError, TypeError):
489         pass
490
491
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493                              footnote=u"", sort_data=True, title=u"",
494                              generate_rst=True):
495     """Generate html table from input data with simple sorting possibility.
496
497     :param header: Table header.
498     :param data: Input data to be included in the table. It is a list of lists.
499         Inner lists are rows in the table. All inner lists must be of the same
500         length. The length of these lists must be the same as the length of the
501         header.
502     :param out_file_name: The name (relative or full path) where the
503         generated html table is written.
504     :param legend: The legend to display below the table.
505     :param footnote: The footnote to display below the table (and legend).
506     :param sort_data: If True the data sorting is enabled.
507     :param title: The table (and file) title.
508     :param generate_rst: If True, wrapping rst file is generated.
509     :type header: list
510     :type data: list of lists
511     :type out_file_name: str
512     :type legend: str
513     :type footnote: str
514     :type sort_data: bool
515     :type title: str
516     :type generate_rst: bool
517     """
518
519     try:
520         idx = header.index(u"Test Case")
521     except ValueError:
522         idx = 0
523     params = {
524         u"align-hdr": (
525             [u"left", u"right"],
526             [u"left", u"left", u"right"],
527             [u"left", u"left", u"left", u"right"]
528         ),
529         u"align-itm": (
530             [u"left", u"right"],
531             [u"left", u"left", u"right"],
532             [u"left", u"left", u"left", u"right"]
533         ),
534         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
535     }
536
537     df_data = pd.DataFrame(data, columns=header)
538
539     if sort_data:
540         df_sorted = [df_data.sort_values(
541             by=[key, header[idx]], ascending=[True, True]
542             if key != header[idx] else [False, True]) for key in header]
543         df_sorted_rev = [df_data.sort_values(
544             by=[key, header[idx]], ascending=[False, True]
545             if key != header[idx] else [True, True]) for key in header]
546         df_sorted.extend(df_sorted_rev)
547     else:
548         df_sorted = df_data
549
550     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
551                    for idx in range(len(df_data))]]
552     table_header = dict(
553         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
554         fill_color=u"#7eade7",
555         align=params[u"align-hdr"][idx],
556         font=dict(
557             family=u"Courier New",
558             size=12
559         )
560     )
561
562     fig = go.Figure()
563
564     if sort_data:
565         for table in df_sorted:
566             columns = [table.get(col) for col in header]
567             fig.add_trace(
568                 go.Table(
569                     columnwidth=params[u"width"][idx],
570                     header=table_header,
571                     cells=dict(
572                         values=columns,
573                         fill_color=fill_color,
574                         align=params[u"align-itm"][idx],
575                         font=dict(
576                             family=u"Courier New",
577                             size=12
578                         )
579                     )
580                 )
581             )
582
583         buttons = list()
584         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
585         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
586         for idx, hdr in enumerate(menu_items):
587             visible = [False, ] * len(menu_items)
588             visible[idx] = True
589             buttons.append(
590                 dict(
591                     label=hdr.replace(u" [Mpps]", u""),
592                     method=u"update",
593                     args=[{u"visible": visible}],
594                 )
595             )
596
597         fig.update_layout(
598             updatemenus=[
599                 go.layout.Updatemenu(
600                     type=u"dropdown",
601                     direction=u"down",
602                     x=0.0,
603                     xanchor=u"left",
604                     y=1.002,
605                     yanchor=u"bottom",
606                     active=len(menu_items) - 1,
607                     buttons=list(buttons)
608                 )
609             ],
610         )
611     else:
612         fig.add_trace(
613             go.Table(
614                 columnwidth=params[u"width"][idx],
615                 header=table_header,
616                 cells=dict(
617                     values=[df_sorted.get(col) for col in header],
618                     fill_color=fill_color,
619                     align=params[u"align-itm"][idx],
620                     font=dict(
621                         family=u"Courier New",
622                         size=12
623                     )
624                 )
625             )
626         )
627
628     ploff.plot(
629         fig,
630         show_link=False,
631         auto_open=False,
632         filename=f"{out_file_name}_in.html"
633     )
634
635     if not generate_rst:
636         return
637
638     file_name = out_file_name.split(u"/")[-1]
639     if u"vpp" in out_file_name:
640         path = u"_tmp/src/vpp_performance_tests/comparisons/"
641     else:
642         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
643     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
644     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
645         rst_file.write(
646             u"\n"
647             u".. |br| raw:: html\n\n    <br />\n\n\n"
648             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
649             u".. |preout| raw:: html\n\n    </pre>\n\n"
650         )
651         if title:
652             rst_file.write(f"{title}\n")
653             rst_file.write(f"{u'`' * len(title)}\n\n")
654         rst_file.write(
655             u".. raw:: html\n\n"
656             f'    <iframe frameborder="0" scrolling="no" '
657             f'width="1600" height="1200" '
658             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
659             f'</iframe>\n\n'
660         )
661
662         if legend:
663             try:
664                 itm_lst = legend[1:-2].split(u"\n")
665                 rst_file.write(
666                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
667                 )
668             except IndexError as err:
669                 logging.error(f"Legend cannot be written to html file\n{err}")
670         if footnote:
671             try:
672                 itm_lst = footnote[1:].split(u"\n")
673                 rst_file.write(
674                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
675                 )
676             except IndexError as err:
677                 logging.error(f"Footnote cannot be written to html file\n{err}")
678
679
680 def table_soak_vs_ndr(table, input_data):
681     """Generate the table(s) with algorithm: table_soak_vs_ndr
682     specified in the specification file.
683
684     :param table: Table to generate.
685     :param input_data: Data to process.
686     :type table: pandas.Series
687     :type input_data: InputData
688     """
689
690     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
691
692     # Transform the data
693     logging.info(
694         f"    Creating the data set for the {table.get(u'type', u'')} "
695         f"{table.get(u'title', u'')}."
696     )
697     data = input_data.filter_data(table, continue_on_error=True)
698
699     # Prepare the header of the table
700     try:
701         header = [
702             u"Test Case",
703             f"Avg({table[u'reference'][u'title']})",
704             f"Stdev({table[u'reference'][u'title']})",
705             f"Avg({table[u'compare'][u'title']})",
706             f"Stdev{table[u'compare'][u'title']})",
707             u"Diff",
708             u"Stdev(Diff)"
709         ]
710         header_str = u";".join(header) + u"\n"
711         legend = (
712             u"\nLegend:\n"
713             f"Avg({table[u'reference'][u'title']}): "
714             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
715             f"from a series of runs of the listed tests.\n"
716             f"Stdev({table[u'reference'][u'title']}): "
717             f"Standard deviation value of {table[u'reference'][u'title']} "
718             f"[Mpps] computed from a series of runs of the listed tests.\n"
719             f"Avg({table[u'compare'][u'title']}): "
720             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
721             f"a series of runs of the listed tests.\n"
722             f"Stdev({table[u'compare'][u'title']}): "
723             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
724             f"computed from a series of runs of the listed tests.\n"
725             f"Diff({table[u'reference'][u'title']},"
726             f"{table[u'compare'][u'title']}): "
727             f"Percentage change calculated for mean values.\n"
728             u"Stdev(Diff): "
729             u"Standard deviation of percentage change calculated for mean "
730             u"values."
731         )
732     except (AttributeError, KeyError) as err:
733         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
734         return
735
736     # Create a list of available SOAK test results:
737     tbl_dict = dict()
738     for job, builds in table[u"compare"][u"data"].items():
739         for build in builds:
740             for tst_name, tst_data in data[job][str(build)].items():
741                 if tst_data[u"type"] == u"SOAK":
742                     tst_name_mod = tst_name.replace(u"-soak", u"")
743                     if tbl_dict.get(tst_name_mod, None) is None:
744                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
745                         nic = groups.group(0) if groups else u""
746                         name = (
747                             f"{nic}-"
748                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
749                         )
750                         tbl_dict[tst_name_mod] = {
751                             u"name": name,
752                             u"ref-data": list(),
753                             u"cmp-data": list()
754                         }
755                     try:
756                         tbl_dict[tst_name_mod][u"cmp-data"].append(
757                             tst_data[u"throughput"][u"LOWER"])
758                     except (KeyError, TypeError):
759                         pass
760     tests_lst = tbl_dict.keys()
761
762     # Add corresponding NDR test results:
763     for job, builds in table[u"reference"][u"data"].items():
764         for build in builds:
765             for tst_name, tst_data in data[job][str(build)].items():
766                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
767                     replace(u"-mrr", u"")
768                 if tst_name_mod not in tests_lst:
769                     continue
770                 try:
771                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
772                         continue
773                     if table[u"include-tests"] == u"MRR":
774                         result = (tst_data[u"result"][u"receive-rate"],
775                                   tst_data[u"result"][u"receive-stdev"])
776                     elif table[u"include-tests"] == u"PDR":
777                         result = \
778                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
779                     elif table[u"include-tests"] == u"NDR":
780                         result = \
781                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
782                     else:
783                         result = None
784                     if result is not None:
785                         tbl_dict[tst_name_mod][u"ref-data"].append(
786                             result)
787                 except (KeyError, TypeError):
788                     continue
789
790     tbl_lst = list()
791     for tst_name in tbl_dict:
792         item = [tbl_dict[tst_name][u"name"], ]
793         data_r = tbl_dict[tst_name][u"ref-data"]
794         if data_r:
795             if table[u"include-tests"] == u"MRR":
796                 data_r_mean = data_r[0][0]
797                 data_r_stdev = data_r[0][1]
798             else:
799                 data_r_mean = mean(data_r)
800                 data_r_stdev = stdev(data_r)
801             item.append(round(data_r_mean / 1e6, 1))
802             item.append(round(data_r_stdev / 1e6, 1))
803         else:
804             data_r_mean = None
805             data_r_stdev = None
806             item.extend([None, None])
807         data_c = tbl_dict[tst_name][u"cmp-data"]
808         if data_c:
809             if table[u"include-tests"] == u"MRR":
810                 data_c_mean = data_c[0][0]
811                 data_c_stdev = data_c[0][1]
812             else:
813                 data_c_mean = mean(data_c)
814                 data_c_stdev = stdev(data_c)
815             item.append(round(data_c_mean / 1e6, 1))
816             item.append(round(data_c_stdev / 1e6, 1))
817         else:
818             data_c_mean = None
819             data_c_stdev = None
820             item.extend([None, None])
821         if data_r_mean is not None and data_c_mean is not None:
822             delta, d_stdev = relative_change_stdev(
823                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
824             try:
825                 item.append(round(delta))
826             except ValueError:
827                 item.append(delta)
828             try:
829                 item.append(round(d_stdev))
830             except ValueError:
831                 item.append(d_stdev)
832             tbl_lst.append(item)
833
834     # Sort the table according to the relative change
835     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
836
837     # Generate csv tables:
838     csv_file_name = f"{table[u'output-file']}.csv"
839     with open(csv_file_name, u"wt") as file_handler:
840         file_handler.write(header_str)
841         for test in tbl_lst:
842             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
843
844     convert_csv_to_pretty_txt(
845         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
846     )
847     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
848         file_handler.write(legend)
849
850     # Generate html table:
851     _tpc_generate_html_table(
852         header,
853         tbl_lst,
854         table[u'output-file'],
855         legend=legend,
856         title=table.get(u"title", u"")
857     )
858
859
860 def table_perf_trending_dash(table, input_data):
861     """Generate the table(s) with algorithm:
862     table_perf_trending_dash
863     specified in the specification file.
864
865     :param table: Table to generate.
866     :param input_data: Data to process.
867     :type table: pandas.Series
868     :type input_data: InputData
869     """
870
871     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
872
873     # Transform the data
874     logging.info(
875         f"    Creating the data set for the {table.get(u'type', u'')} "
876         f"{table.get(u'title', u'')}."
877     )
878     data = input_data.filter_data(table, continue_on_error=True)
879
880     # Prepare the header of the tables
881     header = [
882         u"Test Case",
883         u"Trend [Mpps]",
884         u"Short-Term Change [%]",
885         u"Long-Term Change [%]",
886         u"Regressions [#]",
887         u"Progressions [#]"
888     ]
889     header_str = u",".join(header) + u"\n"
890
891     incl_tests = table.get(u"include-tests", u"MRR")
892
893     # Prepare data to the table:
894     tbl_dict = dict()
895     for job, builds in table[u"data"].items():
896         for build in builds:
897             for tst_name, tst_data in data[job][str(build)].items():
898                 if tst_name.lower() in table.get(u"ignore-list", list()):
899                     continue
900                 if tbl_dict.get(tst_name, None) is None:
901                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
902                     if not groups:
903                         continue
904                     nic = groups.group(0)
905                     tbl_dict[tst_name] = {
906                         u"name": f"{nic}-{tst_data[u'name']}",
907                         u"data": OrderedDict()
908                     }
909                 try:
910                     if incl_tests == u"MRR":
911                         tbl_dict[tst_name][u"data"][str(build)] = \
912                             tst_data[u"result"][u"receive-rate"]
913                     elif incl_tests == u"NDR":
914                         tbl_dict[tst_name][u"data"][str(build)] = \
915                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
916                     elif incl_tests == u"PDR":
917                         tbl_dict[tst_name][u"data"][str(build)] = \
918                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
919                 except (TypeError, KeyError):
920                     pass  # No data in output.xml for this test
921
922     tbl_lst = list()
923     for tst_name in tbl_dict:
924         data_t = tbl_dict[tst_name][u"data"]
925         if len(data_t) < 2:
926             continue
927
928         try:
929             classification_lst, avgs, _ = classify_anomalies(data_t)
930         except ValueError as err:
931             logging.info(f"{err} Skipping")
932             return
933
934         win_size = min(len(data_t), table[u"window"])
935         long_win_size = min(len(data_t), table[u"long-trend-window"])
936
937         try:
938             max_long_avg = max(
939                 [x for x in avgs[-long_win_size:-win_size]
940                  if not isnan(x)])
941         except ValueError:
942             max_long_avg = nan
943         last_avg = avgs[-1]
944         avg_week_ago = avgs[max(-win_size, -len(avgs))]
945
946         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
947             rel_change_last = nan
948         else:
949             rel_change_last = round(
950                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
951
952         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
953             rel_change_long = nan
954         else:
955             rel_change_long = round(
956                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
957
958         if classification_lst:
959             if isnan(rel_change_last) and isnan(rel_change_long):
960                 continue
961             if isnan(last_avg) or isnan(rel_change_last) or \
962                     isnan(rel_change_long):
963                 continue
964             tbl_lst.append(
965                 [tbl_dict[tst_name][u"name"],
966                  round(last_avg / 1e6, 2),
967                  rel_change_last,
968                  rel_change_long,
969                  classification_lst[-win_size+1:].count(u"regression"),
970                  classification_lst[-win_size+1:].count(u"progression")])
971
972     tbl_lst.sort(key=lambda rel: rel[0])
973     tbl_lst.sort(key=lambda rel: rel[3])
974     tbl_lst.sort(key=lambda rel: rel[2])
975
976     tbl_sorted = list()
977     for nrr in range(table[u"window"], -1, -1):
978         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
979         for nrp in range(table[u"window"], -1, -1):
980             tbl_out = [item for item in tbl_reg if item[5] == nrp]
981             tbl_sorted.extend(tbl_out)
982
983     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
984
985     logging.info(f"    Writing file: {file_name}")
986     with open(file_name, u"wt") as file_handler:
987         file_handler.write(header_str)
988         for test in tbl_sorted:
989             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
990
991     logging.info(f"    Writing file: {table[u'output-file']}.txt")
992     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
993
994
995 def _generate_url(testbed, test_name):
996     """Generate URL to a trending plot from the name of the test case.
997
998     :param testbed: The testbed used for testing.
999     :param test_name: The name of the test case.
1000     :type testbed: str
1001     :type test_name: str
1002     :returns: The URL to the plot with the trending data for the given test
1003         case.
1004     :rtype str
1005     """
1006
1007     if u"x520" in test_name:
1008         nic = u"x520"
1009     elif u"x710" in test_name:
1010         nic = u"x710"
1011     elif u"xl710" in test_name:
1012         nic = u"xl710"
1013     elif u"xxv710" in test_name:
1014         nic = u"xxv710"
1015     elif u"vic1227" in test_name:
1016         nic = u"vic1227"
1017     elif u"vic1385" in test_name:
1018         nic = u"vic1385"
1019     elif u"x553" in test_name:
1020         nic = u"x553"
1021     elif u"cx556" in test_name or u"cx556a" in test_name:
1022         nic = u"cx556a"
1023     else:
1024         nic = u""
1025
1026     if u"64b" in test_name:
1027         frame_size = u"64b"
1028     elif u"78b" in test_name:
1029         frame_size = u"78b"
1030     elif u"imix" in test_name:
1031         frame_size = u"imix"
1032     elif u"9000b" in test_name:
1033         frame_size = u"9000b"
1034     elif u"1518b" in test_name:
1035         frame_size = u"1518b"
1036     elif u"114b" in test_name:
1037         frame_size = u"114b"
1038     else:
1039         frame_size = u""
1040
1041     if u"1t1c" in test_name or \
1042         (u"-1c-" in test_name and
1043          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1044         cores = u"1t1c"
1045     elif u"2t2c" in test_name or \
1046          (u"-2c-" in test_name and
1047           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1048         cores = u"2t2c"
1049     elif u"4t4c" in test_name or \
1050          (u"-4c-" in test_name and
1051           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1052         cores = u"4t4c"
1053     elif u"2t1c" in test_name or \
1054          (u"-1c-" in test_name and
1055           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1056         cores = u"2t1c"
1057     elif u"4t2c" in test_name or \
1058          (u"-2c-" in test_name and
1059           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1060         cores = u"4t2c"
1061     elif u"8t4c" in test_name or \
1062          (u"-4c-" in test_name and
1063           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1064         cores = u"8t4c"
1065     else:
1066         cores = u""
1067
1068     if u"testpmd" in test_name:
1069         driver = u"testpmd"
1070     elif u"l3fwd" in test_name:
1071         driver = u"l3fwd"
1072     elif u"avf" in test_name:
1073         driver = u"avf"
1074     elif u"rdma" in test_name:
1075         driver = u"rdma"
1076     elif u"dnv" in testbed or u"tsh" in testbed:
1077         driver = u"ixgbe"
1078     else:
1079         driver = u"dpdk"
1080
1081     if u"macip-iacl1s" in test_name:
1082         bsf = u"features-macip-iacl1"
1083     elif u"macip-iacl10s" in test_name:
1084         bsf = u"features-macip-iacl10"
1085     elif u"macip-iacl50s" in test_name:
1086         bsf = u"features-macip-iacl50"
1087     elif u"iacl1s" in test_name:
1088         bsf = u"features-iacl1"
1089     elif u"iacl10s" in test_name:
1090         bsf = u"features-iacl10"
1091     elif u"iacl50s" in test_name:
1092         bsf = u"features-iacl50"
1093     elif u"oacl1s" in test_name:
1094         bsf = u"features-oacl1"
1095     elif u"oacl10s" in test_name:
1096         bsf = u"features-oacl10"
1097     elif u"oacl50s" in test_name:
1098         bsf = u"features-oacl50"
1099     elif u"nat44det" in test_name:
1100         bsf = u"nat44det-bidir"
1101     elif u"nat44ed" in test_name and u"udir" in test_name:
1102         bsf = u"nat44ed-udir"
1103     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1104         bsf = u"udp-cps"
1105     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1106         bsf = u"tcp-cps"
1107     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1108         bsf = u"udp-pps"
1109     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1110         bsf = u"tcp-pps"
1111     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1112         bsf = u"udp-tput"
1113     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1114         bsf = u"tcp-tput"
1115     elif u"udpsrcscale" in test_name:
1116         bsf = u"features-udp"
1117     elif u"iacl" in test_name:
1118         bsf = u"features"
1119     elif u"policer" in test_name:
1120         bsf = u"features"
1121     elif u"adl" in test_name:
1122         bsf = u"features"
1123     elif u"cop" in test_name:
1124         bsf = u"features"
1125     elif u"nat" in test_name:
1126         bsf = u"features"
1127     elif u"macip" in test_name:
1128         bsf = u"features"
1129     elif u"scale" in test_name:
1130         bsf = u"scale"
1131     elif u"base" in test_name:
1132         bsf = u"base"
1133     else:
1134         bsf = u"base"
1135
1136     if u"114b" in test_name and u"vhost" in test_name:
1137         domain = u"vts"
1138     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1139         domain = u"nat44"
1140         if u"nat44det" in test_name:
1141             domain += u"-det-bidir"
1142         else:
1143             domain += u"-ed"
1144         if u"udir" in test_name:
1145             domain += u"-unidir"
1146         elif u"-ethip4udp-" in test_name:
1147             domain += u"-udp"
1148         elif u"-ethip4tcp-" in test_name:
1149             domain += u"-tcp"
1150         if u"-cps" in test_name:
1151             domain += u"-cps"
1152         elif u"-pps" in test_name:
1153             domain += u"-pps"
1154         elif u"-tput" in test_name:
1155             domain += u"-tput"
1156     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1157         domain = u"dpdk"
1158     elif u"memif" in test_name:
1159         domain = u"container_memif"
1160     elif u"srv6" in test_name:
1161         domain = u"srv6"
1162     elif u"vhost" in test_name:
1163         domain = u"vhost"
1164         if u"vppl2xc" in test_name:
1165             driver += u"-vpp"
1166         else:
1167             driver += u"-testpmd"
1168         if u"lbvpplacp" in test_name:
1169             bsf += u"-link-bonding"
1170     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1171         domain = u"nf_service_density_vnfc"
1172     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1173         domain = u"nf_service_density_cnfc"
1174     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1175         domain = u"nf_service_density_cnfp"
1176     elif u"ipsec" in test_name:
1177         domain = u"ipsec"
1178         if u"sw" in test_name:
1179             bsf += u"-sw"
1180         elif u"hw" in test_name:
1181             bsf += u"-hw"
1182     elif u"ethip4vxlan" in test_name:
1183         domain = u"ip4_tunnels"
1184     elif u"ethip4udpgeneve" in test_name:
1185         domain = u"ip4_tunnels"
1186     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1187         domain = u"ip4"
1188     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1189         domain = u"ip6"
1190     elif u"l2xcbase" in test_name or \
1191             u"l2xcscale" in test_name or \
1192             u"l2bdbasemaclrn" in test_name or \
1193             u"l2bdscale" in test_name or \
1194             u"l2patch" in test_name:
1195         domain = u"l2"
1196     else:
1197         domain = u""
1198
1199     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1200     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1201
1202     return file_name + anchor_name
1203
1204
1205 def table_perf_trending_dash_html(table, input_data):
1206     """Generate the table(s) with algorithm:
1207     table_perf_trending_dash_html specified in the specification
1208     file.
1209
1210     :param table: Table to generate.
1211     :param input_data: Data to process.
1212     :type table: dict
1213     :type input_data: InputData
1214     """
1215
1216     _ = input_data
1217
1218     if not table.get(u"testbed", None):
1219         logging.error(
1220             f"The testbed is not defined for the table "
1221             f"{table.get(u'title', u'')}. Skipping."
1222         )
1223         return
1224
1225     test_type = table.get(u"test-type", u"MRR")
1226     if test_type not in (u"MRR", u"NDR", u"PDR"):
1227         logging.error(
1228             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1229             f"Skipping."
1230         )
1231         return
1232
1233     if test_type in (u"NDR", u"PDR"):
1234         lnk_dir = u"../ndrpdr_trending/"
1235         lnk_sufix = f"-{test_type.lower()}"
1236     else:
1237         lnk_dir = u"../trending/"
1238         lnk_sufix = u""
1239
1240     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1241
1242     try:
1243         with open(table[u"input-file"], u'rt') as csv_file:
1244             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1245     except FileNotFoundError as err:
1246         logging.warning(f"{err}")
1247         return
1248     except KeyError:
1249         logging.warning(u"The input file is not defined.")
1250         return
1251     except csv.Error as err:
1252         logging.warning(
1253             f"Not possible to process the file {table[u'input-file']}.\n"
1254             f"{repr(err)}"
1255         )
1256         return
1257
1258     # Table:
1259     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1260
1261     # Table header:
1262     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1263     for idx, item in enumerate(csv_lst[0]):
1264         alignment = u"left" if idx == 0 else u"center"
1265         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1266         thead.text = item
1267
1268     # Rows:
1269     colors = {
1270         u"regression": (
1271             u"#ffcccc",
1272             u"#ff9999"
1273         ),
1274         u"progression": (
1275             u"#c6ecc6",
1276             u"#9fdf9f"
1277         ),
1278         u"normal": (
1279             u"#e9f1fb",
1280             u"#d4e4f7"
1281         )
1282     }
1283     for r_idx, row in enumerate(csv_lst[1:]):
1284         if int(row[4]):
1285             color = u"regression"
1286         elif int(row[5]):
1287             color = u"progression"
1288         else:
1289             color = u"normal"
1290         trow = ET.SubElement(
1291             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1292         )
1293
1294         # Columns:
1295         for c_idx, item in enumerate(row):
1296             tdata = ET.SubElement(
1297                 trow,
1298                 u"td",
1299                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1300             )
1301             # Name:
1302             if c_idx == 0 and table.get(u"add-links", True):
1303                 ref = ET.SubElement(
1304                     tdata,
1305                     u"a",
1306                     attrib=dict(
1307                         href=f"{lnk_dir}"
1308                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1309                         f"{lnk_sufix}"
1310                     )
1311                 )
1312                 ref.text = item
1313             else:
1314                 tdata.text = item
1315     try:
1316         with open(table[u"output-file"], u'w') as html_file:
1317             logging.info(f"    Writing file: {table[u'output-file']}")
1318             html_file.write(u".. raw:: html\n\n\t")
1319             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1320             html_file.write(u"\n\t<p><br><br></p>\n")
1321     except KeyError:
1322         logging.warning(u"The output file is not defined.")
1323         return
1324
1325
1326 def table_last_failed_tests(table, input_data):
1327     """Generate the table(s) with algorithm: table_last_failed_tests
1328     specified in the specification file.
1329
1330     :param table: Table to generate.
1331     :param input_data: Data to process.
1332     :type table: pandas.Series
1333     :type input_data: InputData
1334     """
1335
1336     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1337
1338     # Transform the data
1339     logging.info(
1340         f"    Creating the data set for the {table.get(u'type', u'')} "
1341         f"{table.get(u'title', u'')}."
1342     )
1343
1344     data = input_data.filter_data(table, continue_on_error=True)
1345
1346     if data is None or data.empty:
1347         logging.warning(
1348             f"    No data for the {table.get(u'type', u'')} "
1349             f"{table.get(u'title', u'')}."
1350         )
1351         return
1352
1353     tbl_list = list()
1354     for job, builds in table[u"data"].items():
1355         for build in builds:
1356             build = str(build)
1357             try:
1358                 version = input_data.metadata(job, build).get(u"version", u"")
1359                 duration = \
1360                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1361             except KeyError:
1362                 logging.error(f"Data for {job}: {build} is not present.")
1363                 return
1364             tbl_list.append(build)
1365             tbl_list.append(version)
1366             failed_tests = list()
1367             passed = 0
1368             failed = 0
1369             for tst_data in data[job][build].values:
1370                 if tst_data[u"status"] != u"FAIL":
1371                     passed += 1
1372                     continue
1373                 failed += 1
1374                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1375                 if not groups:
1376                     continue
1377                 nic = groups.group(0)
1378                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1379             tbl_list.append(passed)
1380             tbl_list.append(failed)
1381             tbl_list.append(duration)
1382             tbl_list.extend(failed_tests)
1383
1384     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1385     logging.info(f"    Writing file: {file_name}")
1386     with open(file_name, u"wt") as file_handler:
1387         for test in tbl_list:
1388             file_handler.write(f"{test}\n")
1389
1390
1391 def table_failed_tests(table, input_data):
1392     """Generate the table(s) with algorithm: table_failed_tests
1393     specified in the specification file.
1394
1395     :param table: Table to generate.
1396     :param input_data: Data to process.
1397     :type table: pandas.Series
1398     :type input_data: InputData
1399     """
1400
1401     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1402
1403     # Transform the data
1404     logging.info(
1405         f"    Creating the data set for the {table.get(u'type', u'')} "
1406         f"{table.get(u'title', u'')}."
1407     )
1408     data = input_data.filter_data(table, continue_on_error=True)
1409
1410     test_type = u"MRR"
1411     if u"NDRPDR" in table.get(u"filter", list()):
1412         test_type = u"NDRPDR"
1413
1414     # Prepare the header of the tables
1415     header = [
1416         u"Test Case",
1417         u"Failures [#]",
1418         u"Last Failure [Time]",
1419         u"Last Failure [VPP-Build-Id]",
1420         u"Last Failure [CSIT-Job-Build-Id]"
1421     ]
1422
1423     # Generate the data for the table according to the model in the table
1424     # specification
1425
1426     now = dt.utcnow()
1427     timeperiod = timedelta(int(table.get(u"window", 7)))
1428
1429     tbl_dict = dict()
1430     for job, builds in table[u"data"].items():
1431         for build in builds:
1432             build = str(build)
1433             for tst_name, tst_data in data[job][build].items():
1434                 if tst_name.lower() in table.get(u"ignore-list", list()):
1435                     continue
1436                 if tbl_dict.get(tst_name, None) is None:
1437                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1438                     if not groups:
1439                         continue
1440                     nic = groups.group(0)
1441                     tbl_dict[tst_name] = {
1442                         u"name": f"{nic}-{tst_data[u'name']}",
1443                         u"data": OrderedDict()
1444                     }
1445                 try:
1446                     generated = input_data.metadata(job, build).\
1447                         get(u"generated", u"")
1448                     if not generated:
1449                         continue
1450                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1451                     if (now - then) <= timeperiod:
1452                         tbl_dict[tst_name][u"data"][build] = (
1453                             tst_data[u"status"],
1454                             generated,
1455                             input_data.metadata(job, build).get(u"version",
1456                                                                 u""),
1457                             build
1458                         )
1459                 except (TypeError, KeyError) as err:
1460                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1461
1462     max_fails = 0
1463     tbl_lst = list()
1464     for tst_data in tbl_dict.values():
1465         fails_nr = 0
1466         fails_last_date = u""
1467         fails_last_vpp = u""
1468         fails_last_csit = u""
1469         for val in tst_data[u"data"].values():
1470             if val[0] == u"FAIL":
1471                 fails_nr += 1
1472                 fails_last_date = val[1]
1473                 fails_last_vpp = val[2]
1474                 fails_last_csit = val[3]
1475         if fails_nr:
1476             max_fails = fails_nr if fails_nr > max_fails else max_fails
1477             tbl_lst.append([
1478                 tst_data[u"name"],
1479                 fails_nr,
1480                 fails_last_date,
1481                 fails_last_vpp,
1482                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1483                 f"-build-{fails_last_csit}"
1484             ])
1485
1486     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1487     tbl_sorted = list()
1488     for nrf in range(max_fails, -1, -1):
1489         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1490         tbl_sorted.extend(tbl_fails)
1491
1492     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1493     logging.info(f"    Writing file: {file_name}")
1494     with open(file_name, u"wt") as file_handler:
1495         file_handler.write(u",".join(header) + u"\n")
1496         for test in tbl_sorted:
1497             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1498
1499     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1500     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1501
1502
1503 def table_failed_tests_html(table, input_data):
1504     """Generate the table(s) with algorithm: table_failed_tests_html
1505     specified in the specification file.
1506
1507     :param table: Table to generate.
1508     :param input_data: Data to process.
1509     :type table: pandas.Series
1510     :type input_data: InputData
1511     """
1512
1513     _ = input_data
1514
1515     if not table.get(u"testbed", None):
1516         logging.error(
1517             f"The testbed is not defined for the table "
1518             f"{table.get(u'title', u'')}. Skipping."
1519         )
1520         return
1521
1522     test_type = table.get(u"test-type", u"MRR")
1523     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1524         logging.error(
1525             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1526             f"Skipping."
1527         )
1528         return
1529
1530     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1531         lnk_dir = u"../ndrpdr_trending/"
1532         lnk_sufix = u"-pdr"
1533     else:
1534         lnk_dir = u"../trending/"
1535         lnk_sufix = u""
1536
1537     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1538
1539     try:
1540         with open(table[u"input-file"], u'rt') as csv_file:
1541             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1542     except KeyError:
1543         logging.warning(u"The input file is not defined.")
1544         return
1545     except csv.Error as err:
1546         logging.warning(
1547             f"Not possible to process the file {table[u'input-file']}.\n"
1548             f"{repr(err)}"
1549         )
1550         return
1551
1552     # Table:
1553     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1554
1555     # Table header:
1556     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1557     for idx, item in enumerate(csv_lst[0]):
1558         alignment = u"left" if idx == 0 else u"center"
1559         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1560         thead.text = item
1561
1562     # Rows:
1563     colors = (u"#e9f1fb", u"#d4e4f7")
1564     for r_idx, row in enumerate(csv_lst[1:]):
1565         background = colors[r_idx % 2]
1566         trow = ET.SubElement(
1567             failed_tests, u"tr", attrib=dict(bgcolor=background)
1568         )
1569
1570         # Columns:
1571         for c_idx, item in enumerate(row):
1572             tdata = ET.SubElement(
1573                 trow,
1574                 u"td",
1575                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1576             )
1577             # Name:
1578             if c_idx == 0 and table.get(u"add-links", True):
1579                 ref = ET.SubElement(
1580                     tdata,
1581                     u"a",
1582                     attrib=dict(
1583                         href=f"{lnk_dir}"
1584                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1585                         f"{lnk_sufix}"
1586                     )
1587                 )
1588                 ref.text = item
1589             else:
1590                 tdata.text = item
1591     try:
1592         with open(table[u"output-file"], u'w') as html_file:
1593             logging.info(f"    Writing file: {table[u'output-file']}")
1594             html_file.write(u".. raw:: html\n\n\t")
1595             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1596             html_file.write(u"\n\t<p><br><br></p>\n")
1597     except KeyError:
1598         logging.warning(u"The output file is not defined.")
1599         return
1600
1601
1602 def table_comparison(table, input_data):
1603     """Generate the table(s) with algorithm: table_comparison
1604     specified in the specification file.
1605
1606     :param table: Table to generate.
1607     :param input_data: Data to process.
1608     :type table: pandas.Series
1609     :type input_data: InputData
1610     """
1611     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1612
1613     # Transform the data
1614     logging.info(
1615         f"    Creating the data set for the {table.get(u'type', u'')} "
1616         f"{table.get(u'title', u'')}."
1617     )
1618
1619     columns = table.get(u"columns", None)
1620     if not columns:
1621         logging.error(
1622             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1623         )
1624         return
1625
1626     cols = list()
1627     for idx, col in enumerate(columns):
1628         if col.get(u"data-set", None) is None:
1629             logging.warning(f"No data for column {col.get(u'title', u'')}")
1630             continue
1631         tag = col.get(u"tag", None)
1632         data = input_data.filter_data(
1633             table,
1634             params=[
1635                 u"throughput",
1636                 u"result",
1637                 u"latency",
1638                 u"name",
1639                 u"parent",
1640                 u"tags"
1641             ],
1642             data=col[u"data-set"],
1643             continue_on_error=True
1644         )
1645         col_data = {
1646             u"title": col.get(u"title", f"Column{idx}"),
1647             u"data": dict()
1648         }
1649         for builds in data.values:
1650             for build in builds:
1651                 for tst_name, tst_data in build.items():
1652                     if tag and tag not in tst_data[u"tags"]:
1653                         continue
1654                     tst_name_mod = \
1655                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1656                         replace(u"2n1l-", u"")
1657                     if col_data[u"data"].get(tst_name_mod, None) is None:
1658                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1659                         if u"across testbeds" in table[u"title"].lower() or \
1660                                 u"across topologies" in table[u"title"].lower():
1661                             name = _tpc_modify_displayed_test_name(name)
1662                         col_data[u"data"][tst_name_mod] = {
1663                             u"name": name,
1664                             u"replace": True,
1665                             u"data": list(),
1666                             u"mean": None,
1667                             u"stdev": None
1668                         }
1669                     _tpc_insert_data(
1670                         target=col_data[u"data"][tst_name_mod],
1671                         src=tst_data,
1672                         include_tests=table[u"include-tests"]
1673                     )
1674
1675         replacement = col.get(u"data-replacement", None)
1676         if replacement:
1677             rpl_data = input_data.filter_data(
1678                 table,
1679                 params=[
1680                     u"throughput",
1681                     u"result",
1682                     u"latency",
1683                     u"name",
1684                     u"parent",
1685                     u"tags"
1686                 ],
1687                 data=replacement,
1688                 continue_on_error=True
1689             )
1690             for builds in rpl_data.values:
1691                 for build in builds:
1692                     for tst_name, tst_data in build.items():
1693                         if tag and tag not in tst_data[u"tags"]:
1694                             continue
1695                         tst_name_mod = \
1696                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1697                             replace(u"2n1l-", u"")
1698                         if col_data[u"data"].get(tst_name_mod, None) is None:
1699                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1700                             if u"across testbeds" in table[u"title"].lower() \
1701                                     or u"across topologies" in \
1702                                     table[u"title"].lower():
1703                                 name = _tpc_modify_displayed_test_name(name)
1704                             col_data[u"data"][tst_name_mod] = {
1705                                 u"name": name,
1706                                 u"replace": False,
1707                                 u"data": list(),
1708                                 u"mean": None,
1709                                 u"stdev": None
1710                             }
1711                         if col_data[u"data"][tst_name_mod][u"replace"]:
1712                             col_data[u"data"][tst_name_mod][u"replace"] = False
1713                             col_data[u"data"][tst_name_mod][u"data"] = list()
1714                         _tpc_insert_data(
1715                             target=col_data[u"data"][tst_name_mod],
1716                             src=tst_data,
1717                             include_tests=table[u"include-tests"]
1718                         )
1719
1720         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1721                 u"latency" in table[u"include-tests"]:
1722             for tst_name, tst_data in col_data[u"data"].items():
1723                 if tst_data[u"data"]:
1724                     tst_data[u"mean"] = mean(tst_data[u"data"])
1725                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1726
1727         cols.append(col_data)
1728
1729     tbl_dict = dict()
1730     for col in cols:
1731         for tst_name, tst_data in col[u"data"].items():
1732             if tbl_dict.get(tst_name, None) is None:
1733                 tbl_dict[tst_name] = {
1734                     "name": tst_data[u"name"]
1735                 }
1736             tbl_dict[tst_name][col[u"title"]] = {
1737                 u"mean": tst_data[u"mean"],
1738                 u"stdev": tst_data[u"stdev"]
1739             }
1740
1741     if not tbl_dict:
1742         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1743         return
1744
1745     tbl_lst = list()
1746     for tst_data in tbl_dict.values():
1747         row = [tst_data[u"name"], ]
1748         for col in cols:
1749             row.append(tst_data.get(col[u"title"], None))
1750         tbl_lst.append(row)
1751
1752     comparisons = table.get(u"comparisons", None)
1753     rcas = list()
1754     if comparisons and isinstance(comparisons, list):
1755         for idx, comp in enumerate(comparisons):
1756             try:
1757                 col_ref = int(comp[u"reference"])
1758                 col_cmp = int(comp[u"compare"])
1759             except KeyError:
1760                 logging.warning(u"Comparison: No references defined! Skipping.")
1761                 comparisons.pop(idx)
1762                 continue
1763             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1764                     col_ref == col_cmp):
1765                 logging.warning(f"Wrong values of reference={col_ref} "
1766                                 f"and/or compare={col_cmp}. Skipping.")
1767                 comparisons.pop(idx)
1768                 continue
1769             rca_file_name = comp.get(u"rca-file", None)
1770             if rca_file_name:
1771                 try:
1772                     with open(rca_file_name, u"r") as file_handler:
1773                         rcas.append(
1774                             {
1775                                 u"title": f"RCA{idx + 1}",
1776                                 u"data": load(file_handler, Loader=FullLoader)
1777                             }
1778                         )
1779                 except (YAMLError, IOError) as err:
1780                     logging.warning(
1781                         f"The RCA file {rca_file_name} does not exist or "
1782                         f"it is corrupted!"
1783                     )
1784                     logging.debug(repr(err))
1785                     rcas.append(None)
1786             else:
1787                 rcas.append(None)
1788     else:
1789         comparisons = None
1790
1791     tbl_cmp_lst = list()
1792     if comparisons:
1793         for row in tbl_lst:
1794             new_row = deepcopy(row)
1795             for comp in comparisons:
1796                 ref_itm = row[int(comp[u"reference"])]
1797                 if ref_itm is None and \
1798                         comp.get(u"reference-alt", None) is not None:
1799                     ref_itm = row[int(comp[u"reference-alt"])]
1800                 cmp_itm = row[int(comp[u"compare"])]
1801                 if ref_itm is not None and cmp_itm is not None and \
1802                         ref_itm[u"mean"] is not None and \
1803                         cmp_itm[u"mean"] is not None and \
1804                         ref_itm[u"stdev"] is not None and \
1805                         cmp_itm[u"stdev"] is not None:
1806                     try:
1807                         delta, d_stdev = relative_change_stdev(
1808                             ref_itm[u"mean"], cmp_itm[u"mean"],
1809                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1810                         )
1811                     except ZeroDivisionError:
1812                         break
1813                     if delta is None or math.isnan(delta):
1814                         break
1815                     new_row.append({
1816                         u"mean": delta * 1e6,
1817                         u"stdev": d_stdev * 1e6
1818                     })
1819                 else:
1820                     break
1821             else:
1822                 tbl_cmp_lst.append(new_row)
1823
1824     try:
1825         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1826         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1827     except TypeError as err:
1828         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1829
1830     tbl_for_csv = list()
1831     for line in tbl_cmp_lst:
1832         row = [line[0], ]
1833         for idx, itm in enumerate(line[1:]):
1834             if itm is None or not isinstance(itm, dict) or\
1835                     itm.get(u'mean', None) is None or \
1836                     itm.get(u'stdev', None) is None:
1837                 row.append(u"NT")
1838                 row.append(u"NT")
1839             else:
1840                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1841                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1842         for rca in rcas:
1843             if rca is None:
1844                 continue
1845             rca_nr = rca[u"data"].get(row[0], u"-")
1846             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1847         tbl_for_csv.append(row)
1848
1849     header_csv = [u"Test Case", ]
1850     for col in cols:
1851         header_csv.append(f"Avg({col[u'title']})")
1852         header_csv.append(f"Stdev({col[u'title']})")
1853     for comp in comparisons:
1854         header_csv.append(
1855             f"Avg({comp.get(u'title', u'')})"
1856         )
1857         header_csv.append(
1858             f"Stdev({comp.get(u'title', u'')})"
1859         )
1860     for rca in rcas:
1861         if rca:
1862             header_csv.append(rca[u"title"])
1863
1864     legend_lst = table.get(u"legend", None)
1865     if legend_lst is None:
1866         legend = u""
1867     else:
1868         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1869
1870     footnote = u""
1871     if rcas and any(rcas):
1872         footnote += u"\nRoot Cause Analysis:\n"
1873         for rca in rcas:
1874             if rca:
1875                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1876
1877     csv_file_name = f"{table[u'output-file']}-csv.csv"
1878     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1879         file_handler.write(
1880             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1881         )
1882         for test in tbl_for_csv:
1883             file_handler.write(
1884                 u",".join([f'"{item}"' for item in test]) + u"\n"
1885             )
1886         if legend_lst:
1887             for item in legend_lst:
1888                 file_handler.write(f'"{item}"\n')
1889         if footnote:
1890             for itm in footnote.split(u"\n"):
1891                 file_handler.write(f'"{itm}"\n')
1892
1893     tbl_tmp = list()
1894     max_lens = [0, ] * len(tbl_cmp_lst[0])
1895     for line in tbl_cmp_lst:
1896         row = [line[0], ]
1897         for idx, itm in enumerate(line[1:]):
1898             if itm is None or not isinstance(itm, dict) or \
1899                     itm.get(u'mean', None) is None or \
1900                     itm.get(u'stdev', None) is None:
1901                 new_itm = u"NT"
1902             else:
1903                 if idx < len(cols):
1904                     new_itm = (
1905                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1906                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1907                         replace(u"nan", u"NaN")
1908                     )
1909                 else:
1910                     new_itm = (
1911                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1912                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1913                         replace(u"nan", u"NaN")
1914                     )
1915             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1916                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1917             row.append(new_itm)
1918
1919         tbl_tmp.append(row)
1920
1921     header = [u"Test Case", ]
1922     header.extend([col[u"title"] for col in cols])
1923     header.extend([comp.get(u"title", u"") for comp in comparisons])
1924
1925     tbl_final = list()
1926     for line in tbl_tmp:
1927         row = [line[0], ]
1928         for idx, itm in enumerate(line[1:]):
1929             if itm in (u"NT", u"NaN"):
1930                 row.append(itm)
1931                 continue
1932             itm_lst = itm.rsplit(u"\u00B1", 1)
1933             itm_lst[-1] = \
1934                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1935             itm_str = u"\u00B1".join(itm_lst)
1936
1937             if idx >= len(cols):
1938                 # Diffs
1939                 rca = rcas[idx - len(cols)]
1940                 if rca:
1941                     # Add rcas to diffs
1942                     rca_nr = rca[u"data"].get(row[0], None)
1943                     if rca_nr:
1944                         hdr_len = len(header[idx + 1]) - 1
1945                         if hdr_len < 19:
1946                             hdr_len = 19
1947                         rca_nr = f"[{rca_nr}]"
1948                         itm_str = (
1949                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1950                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1951                             f"{itm_str}"
1952                         )
1953             row.append(itm_str)
1954         tbl_final.append(row)
1955
1956     # Generate csv tables:
1957     csv_file_name = f"{table[u'output-file']}.csv"
1958     logging.info(f"    Writing the file {csv_file_name}")
1959     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1960         file_handler.write(u";".join(header) + u"\n")
1961         for test in tbl_final:
1962             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1963
1964     # Generate txt table:
1965     txt_file_name = f"{table[u'output-file']}.txt"
1966     logging.info(f"    Writing the file {txt_file_name}")
1967     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1968
1969     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1970         file_handler.write(legend)
1971         file_handler.write(footnote)
1972
1973     # Generate html table:
1974     _tpc_generate_html_table(
1975         header,
1976         tbl_final,
1977         table[u'output-file'],
1978         legend=legend,
1979         footnote=footnote,
1980         sort_data=False,
1981         title=table.get(u"title", u"")
1982     )
1983
1984
1985 def table_weekly_comparison(table, in_data):
1986     """Generate the table(s) with algorithm: table_weekly_comparison
1987     specified in the specification file.
1988
1989     :param table: Table to generate.
1990     :param in_data: Data to process.
1991     :type table: pandas.Series
1992     :type in_data: InputData
1993     """
1994     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1995
1996     # Transform the data
1997     logging.info(
1998         f"    Creating the data set for the {table.get(u'type', u'')} "
1999         f"{table.get(u'title', u'')}."
2000     )
2001
2002     incl_tests = table.get(u"include-tests", None)
2003     if incl_tests not in (u"NDR", u"PDR"):
2004         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2005         return
2006
2007     nr_cols = table.get(u"nr-of-data-columns", None)
2008     if not nr_cols or nr_cols < 2:
2009         logging.error(
2010             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2011         )
2012         return
2013
2014     data = in_data.filter_data(
2015         table,
2016         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2017         continue_on_error=True
2018     )
2019
2020     header = [
2021         [u"VPP Version", ],
2022         [u"Start Timestamp", ],
2023         [u"CSIT Build", ],
2024         [u"CSIT Testbed", ]
2025     ]
2026     tbl_dict = dict()
2027     idx = 0
2028     tb_tbl = table.get(u"testbeds", None)
2029     for job_name, job_data in data.items():
2030         for build_nr, build in job_data.items():
2031             if idx >= nr_cols:
2032                 break
2033             if build.empty:
2034                 continue
2035
2036             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2037             if tb_ip and tb_tbl:
2038                 testbed = tb_tbl.get(tb_ip, u"")
2039             else:
2040                 testbed = u""
2041             header[2].insert(1, build_nr)
2042             header[3].insert(1, testbed)
2043             header[1].insert(
2044                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2045             )
2046             header[0].insert(
2047                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2048             )
2049
2050             for tst_name, tst_data in build.items():
2051                 tst_name_mod = \
2052                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2053                 if not tbl_dict.get(tst_name_mod, None):
2054                     tbl_dict[tst_name_mod] = dict(
2055                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2056                     )
2057                 try:
2058                     tbl_dict[tst_name_mod][-idx - 1] = \
2059                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2060                 except (TypeError, IndexError, KeyError, ValueError):
2061                     pass
2062             idx += 1
2063
2064     if idx < nr_cols:
2065         logging.error(u"Not enough data to build the table! Skipping")
2066         return
2067
2068     cmp_dict = dict()
2069     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2070         idx_ref = cmp.get(u"reference", None)
2071         idx_cmp = cmp.get(u"compare", None)
2072         if idx_ref is None or idx_cmp is None:
2073             continue
2074         header[0].append(
2075             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2076             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2077         )
2078         header[1].append(u"")
2079         header[2].append(u"")
2080         header[3].append(u"")
2081         for tst_name, tst_data in tbl_dict.items():
2082             if not cmp_dict.get(tst_name, None):
2083                 cmp_dict[tst_name] = list()
2084             ref_data = tst_data.get(idx_ref, None)
2085             cmp_data = tst_data.get(idx_cmp, None)
2086             if ref_data is None or cmp_data is None:
2087                 cmp_dict[tst_name].append(float(u'nan'))
2088             else:
2089                 cmp_dict[tst_name].append(
2090                     relative_change(ref_data, cmp_data)
2091                 )
2092
2093     tbl_lst_none = list()
2094     tbl_lst = list()
2095     for tst_name, tst_data in tbl_dict.items():
2096         itm_lst = [tst_data[u"name"], ]
2097         for idx in range(nr_cols):
2098             item = tst_data.get(-idx - 1, None)
2099             if item is None:
2100                 itm_lst.insert(1, None)
2101             else:
2102                 itm_lst.insert(1, round(item / 1e6, 1))
2103         itm_lst.extend(
2104             [
2105                 None if itm is None else round(itm, 1)
2106                 for itm in cmp_dict[tst_name]
2107             ]
2108         )
2109         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2110             tbl_lst_none.append(itm_lst)
2111         else:
2112             tbl_lst.append(itm_lst)
2113
2114     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2115     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2116     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2117     tbl_lst.extend(tbl_lst_none)
2118
2119     # Generate csv table:
2120     csv_file_name = f"{table[u'output-file']}.csv"
2121     logging.info(f"    Writing the file {csv_file_name}")
2122     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2123         for hdr in header:
2124             file_handler.write(u",".join(hdr) + u"\n")
2125         for test in tbl_lst:
2126             file_handler.write(u",".join(
2127                 [
2128                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2129                     replace(u"null", u"-") for item in test
2130                 ]
2131             ) + u"\n")
2132
2133     txt_file_name = f"{table[u'output-file']}.txt"
2134     logging.info(f"    Writing the file {txt_file_name}")
2135     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2136
2137     # Reorganize header in txt table
2138     txt_table = list()
2139     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2140         for line in list(file_handler):
2141             txt_table.append(line)
2142     try:
2143         txt_table.insert(5, txt_table.pop(2))
2144         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2145             file_handler.writelines(txt_table)
2146     except IndexError:
2147         pass
2148
2149     # Generate html table:
2150     hdr_html = [
2151         u"<br>".join(row) for row in zip(*header)
2152     ]
2153     _tpc_generate_html_table(
2154         hdr_html,
2155         tbl_lst,
2156         table[u'output-file'],
2157         sort_data=True,
2158         title=table.get(u"title", u""),
2159         generate_rst=False
2160     )