8218084f7187728ae9a988c0aa0617a3591cb844
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
35
36 from pal_utils import mean, stdev, classify_anomalies, \
37     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
38
39
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41
42
43 def generate_tables(spec, data):
44     """Generate all tables specified in the specification file.
45
46     :param spec: Specification read from the specification file.
47     :param data: Data to process.
48     :type spec: Specification
49     :type data: InputData
50     """
51
52     generator = {
53         u"table_merged_details": table_merged_details,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html,
61         u"table_comparison": table_comparison,
62         u"table_weekly_comparison": table_weekly_comparison
63     }
64
65     logging.info(u"Generating the tables ...")
66     for table in spec.tables:
67         try:
68             if table[u"algorithm"] == u"table_weekly_comparison":
69                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70             generator[table[u"algorithm"]](table, data)
71         except NameError as err:
72             logging.error(
73                 f"Probably algorithm {table[u'algorithm']} is not defined: "
74                 f"{repr(err)}"
75             )
76     logging.info(u"Done.")
77
78
79 def table_oper_data_html(table, input_data):
80     """Generate the table(s) with algorithm: html_table_oper_data
81     specified in the specification file.
82
83     :param table: Table to generate.
84     :param input_data: Data to process.
85     :type table: pandas.Series
86     :type input_data: InputData
87     """
88
89     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
90     # Transform the data
91     logging.info(
92         f"    Creating the data set for the {table.get(u'type', u'')} "
93         f"{table.get(u'title', u'')}."
94     )
95     data = input_data.filter_data(
96         table,
97         params=[u"name", u"parent", u"show-run", u"type"],
98         continue_on_error=True
99     )
100     if data.empty:
101         return
102     data = input_data.merge_data(data)
103
104     sort_tests = table.get(u"sort", None)
105     if sort_tests:
106         args = dict(
107             inplace=True,
108             ascending=(sort_tests == u"ascending")
109         )
110         data.sort_index(**args)
111
112     suites = input_data.filter_data(
113         table,
114         continue_on_error=True,
115         data_set=u"suites"
116     )
117     if suites.empty:
118         return
119     suites = input_data.merge_data(suites)
120
121     def _generate_html_table(tst_data):
122         """Generate an HTML table with operational data for the given test.
123
124         :param tst_data: Test data to be used to generate the table.
125         :type tst_data: pandas.Series
126         :returns: HTML table with operational data.
127         :rtype: str
128         """
129
130         colors = {
131             u"header": u"#7eade7",
132             u"empty": u"#ffffff",
133             u"body": (u"#e9f1fb", u"#d4e4f7")
134         }
135
136         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137
138         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139         thead = ET.SubElement(
140             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141         )
142         thead.text = tst_data[u"name"]
143
144         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145         thead = ET.SubElement(
146             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147         )
148         thead.text = u"\t"
149
150         if tst_data.get(u"show-run", u"No Data") == u"No Data":
151             trow = ET.SubElement(
152                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153             )
154             tcol = ET.SubElement(
155                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156             )
157             tcol.text = u"No Data"
158
159             trow = ET.SubElement(
160                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161             )
162             thead = ET.SubElement(
163                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164             )
165             font = ET.SubElement(
166                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
167             )
168             font.text = u"."
169             return str(ET.tostring(tbl, encoding=u"unicode"))
170
171         tbl_hdr = (
172             u"Name",
173             u"Nr of Vectors",
174             u"Nr of Packets",
175             u"Suspends",
176             u"Cycles per Packet",
177             u"Average Vector Size"
178         )
179
180         for dut_data in tst_data[u"show-run"].values():
181             trow = ET.SubElement(
182                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183             )
184             tcol = ET.SubElement(
185                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186             )
187             if dut_data.get(u"runtime", None) is None:
188                 tcol.text = u"No Data"
189                 continue
190
191             runtime = loads(dut_data[u"runtime"])
192
193             try:
194                 threads_nr = len(runtime[0][u"clocks"])
195             except (IndexError, KeyError):
196                 tcol.text = u"No Data"
197                 continue
198
199             threads = OrderedDict({idx: list() for idx in range(threads_nr)})
200             for item in runtime:
201                 for idx in range(threads_nr):
202                     if item[u"vectors"][idx] > 0:
203                         clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
204                     elif item[u"calls"][idx] > 0:
205                         clocks = item[u"clocks"][idx] / item[u"calls"][idx]
206                     elif item[u"suspends"][idx] > 0:
207                         clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
208                     else:
209                         clocks = 0.0
210
211                     if item[u"calls"][idx] > 0:
212                         vectors_call = item[u"vectors"][idx] / item[u"calls"][
213                             idx]
214                     else:
215                         vectors_call = 0.0
216
217                     if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
218                         int(item[u"suspends"][idx]):
219                         threads[idx].append([
220                             item[u"name"],
221                             item[u"calls"][idx],
222                             item[u"vectors"][idx],
223                             item[u"suspends"][idx],
224                             clocks,
225                             vectors_call
226                         ])
227
228             bold = ET.SubElement(tcol, u"b")
229             bold.text = (
230                 f"Host IP: {dut_data.get(u'host', '')}, "
231                 f"Socket: {dut_data.get(u'socket', '')}"
232             )
233             trow = ET.SubElement(
234                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
235             )
236             thead = ET.SubElement(
237                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
238             )
239             thead.text = u"\t"
240
241             for thread_nr, thread in threads.items():
242                 trow = ET.SubElement(
243                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
244                 )
245                 tcol = ET.SubElement(
246                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
247                 )
248                 bold = ET.SubElement(tcol, u"b")
249                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
250                 trow = ET.SubElement(
251                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
252                 )
253                 for idx, col in enumerate(tbl_hdr):
254                     tcol = ET.SubElement(
255                         trow, u"td",
256                         attrib=dict(align=u"right" if idx else u"left")
257                     )
258                     font = ET.SubElement(
259                         tcol, u"font", attrib=dict(size=u"2")
260                     )
261                     bold = ET.SubElement(font, u"b")
262                     bold.text = col
263                 for row_nr, row in enumerate(thread):
264                     trow = ET.SubElement(
265                         tbl, u"tr",
266                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
267                     )
268                     for idx, col in enumerate(row):
269                         tcol = ET.SubElement(
270                             trow, u"td",
271                             attrib=dict(align=u"right" if idx else u"left")
272                         )
273                         font = ET.SubElement(
274                             tcol, u"font", attrib=dict(size=u"2")
275                         )
276                         if isinstance(col, float):
277                             font.text = f"{col:.2f}"
278                         else:
279                             font.text = str(col)
280                 trow = ET.SubElement(
281                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
282                 )
283                 thead = ET.SubElement(
284                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
285                 )
286                 thead.text = u"\t"
287
288         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
289         thead = ET.SubElement(
290             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
291         )
292         font = ET.SubElement(
293             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
294         )
295         font.text = u"."
296
297         return str(ET.tostring(tbl, encoding=u"unicode"))
298
299     for suite in suites.values:
300         html_table = str()
301         for test_data in data.values:
302             if test_data[u"parent"] not in suite[u"name"]:
303                 continue
304             html_table += _generate_html_table(test_data)
305         if not html_table:
306             continue
307         try:
308             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
309             with open(f"{file_name}", u'w') as html_file:
310                 logging.info(f"    Writing file: {file_name}")
311                 html_file.write(u".. raw:: html\n\n\t")
312                 html_file.write(html_table)
313                 html_file.write(u"\n\t<p><br><br></p>\n")
314         except KeyError:
315             logging.warning(u"The output file is not defined.")
316             return
317     logging.info(u"  Done.")
318
319
320 def table_merged_details(table, input_data):
321     """Generate the table(s) with algorithm: table_merged_details
322     specified in the specification file.
323
324     :param table: Table to generate.
325     :param input_data: Data to process.
326     :type table: pandas.Series
327     :type input_data: InputData
328     """
329
330     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
331
332     # Transform the data
333     logging.info(
334         f"    Creating the data set for the {table.get(u'type', u'')} "
335         f"{table.get(u'title', u'')}."
336     )
337     data = input_data.filter_data(table, continue_on_error=True)
338     data = input_data.merge_data(data)
339
340     sort_tests = table.get(u"sort", None)
341     if sort_tests:
342         args = dict(
343             inplace=True,
344             ascending=(sort_tests == u"ascending")
345         )
346         data.sort_index(**args)
347
348     suites = input_data.filter_data(
349         table, continue_on_error=True, data_set=u"suites")
350     suites = input_data.merge_data(suites)
351
352     # Prepare the header of the tables
353     header = list()
354     for column in table[u"columns"]:
355         header.append(
356             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
357         )
358
359     for suite in suites.values:
360         # Generate data
361         suite_name = suite[u"name"]
362         table_lst = list()
363         for test in data.keys():
364             if data[test][u"status"] != u"PASS" or \
365                     data[test][u"parent"] not in suite_name:
366                 continue
367             row_lst = list()
368             for column in table[u"columns"]:
369                 try:
370                     col_data = str(data[test][column[
371                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
372                     # Do not include tests with "Test Failed" in test message
373                     if u"Test Failed" in col_data:
374                         continue
375                     col_data = col_data.replace(
376                         u"No Data", u"Not Captured     "
377                     )
378                     if column[u"data"].split(u" ")[1] in (u"name", ):
379                         if len(col_data) > 30:
380                             col_data_lst = col_data.split(u"-")
381                             half = int(len(col_data_lst) / 2)
382                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
383                                        f"- |br| " \
384                                        f"{u'-'.join(col_data_lst[half:])}"
385                         col_data = f" |prein| {col_data} |preout| "
386                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
387                         # Temporary solution: remove NDR results from message:
388                         if bool(table.get(u'remove-ndr', False)):
389                             try:
390                                 col_data = col_data.split(u" |br| ", 1)[1]
391                             except IndexError:
392                                 pass
393                         col_data = col_data.replace(u'\n', u' |br| ').\
394                             replace(u'\r', u'').replace(u'"', u"'")
395                         col_data = f" |prein| {col_data} |preout| "
396                     elif column[u"data"].split(u" ")[1] in \
397                             (u"conf-history", u"show-run"):
398                         col_data = col_data.replace(u'\n', u' |br| ')
399                         col_data = f" |prein| {col_data[:-5]} |preout| "
400                     row_lst.append(f'"{col_data}"')
401                 except KeyError:
402                     row_lst.append(u'"Not captured"')
403             if len(row_lst) == len(table[u"columns"]):
404                 table_lst.append(row_lst)
405
406         # Write the data to file
407         if table_lst:
408             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
409             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
410             logging.info(f"      Writing file: {file_name}")
411             with open(file_name, u"wt") as file_handler:
412                 file_handler.write(u",".join(header) + u"\n")
413                 for item in table_lst:
414                     file_handler.write(u",".join(item) + u"\n")
415
416     logging.info(u"  Done.")
417
418
419 def _tpc_modify_test_name(test_name, ignore_nic=False):
420     """Modify a test name by replacing its parts.
421
422     :param test_name: Test name to be modified.
423     :param ignore_nic: If True, NIC is removed from TC name.
424     :type test_name: str
425     :type ignore_nic: bool
426     :returns: Modified test name.
427     :rtype: str
428     """
429     test_name_mod = test_name.\
430         replace(u"-ndrpdr", u"").\
431         replace(u"1t1c", u"1c").\
432         replace(u"2t1c", u"1c"). \
433         replace(u"2t2c", u"2c").\
434         replace(u"4t2c", u"2c"). \
435         replace(u"4t4c", u"4c").\
436         replace(u"8t4c", u"4c")
437
438     if ignore_nic:
439         return re.sub(REGEX_NIC, u"", test_name_mod)
440     return test_name_mod
441
442
443 def _tpc_modify_displayed_test_name(test_name):
444     """Modify a test name which is displayed in a table by replacing its parts.
445
446     :param test_name: Test name to be modified.
447     :type test_name: str
448     :returns: Modified test name.
449     :rtype: str
450     """
451     return test_name.\
452         replace(u"1t1c", u"1c").\
453         replace(u"2t1c", u"1c"). \
454         replace(u"2t2c", u"2c").\
455         replace(u"4t2c", u"2c"). \
456         replace(u"4t4c", u"4c").\
457         replace(u"8t4c", u"4c")
458
459
460 def _tpc_insert_data(target, src, include_tests):
461     """Insert src data to the target structure.
462
463     :param target: Target structure where the data is placed.
464     :param src: Source data to be placed into the target structure.
465     :param include_tests: Which results will be included (MRR, NDR, PDR).
466     :type target: list
467     :type src: dict
468     :type include_tests: str
469     """
470     try:
471         if include_tests == u"MRR":
472             target[u"mean"] = src[u"result"][u"receive-rate"]
473             target[u"stdev"] = src[u"result"][u"receive-stdev"]
474         elif include_tests == u"PDR":
475             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
476         elif include_tests == u"NDR":
477             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
478     except (KeyError, TypeError):
479         pass
480
481
482 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
483                              footnote=u"", sort_data=True, title=u"",
484                              generate_rst=True):
485     """Generate html table from input data with simple sorting possibility.
486
487     :param header: Table header.
488     :param data: Input data to be included in the table. It is a list of lists.
489         Inner lists are rows in the table. All inner lists must be of the same
490         length. The length of these lists must be the same as the length of the
491         header.
492     :param out_file_name: The name (relative or full path) where the
493         generated html table is written.
494     :param legend: The legend to display below the table.
495     :param footnote: The footnote to display below the table (and legend).
496     :param sort_data: If True the data sorting is enabled.
497     :param title: The table (and file) title.
498     :param generate_rst: If True, wrapping rst file is generated.
499     :type header: list
500     :type data: list of lists
501     :type out_file_name: str
502     :type legend: str
503     :type footnote: str
504     :type sort_data: bool
505     :type title: str
506     :type generate_rst: bool
507     """
508
509     try:
510         idx = header.index(u"Test Case")
511     except ValueError:
512         idx = 0
513     params = {
514         u"align-hdr": (
515             [u"left", u"right"],
516             [u"left", u"left", u"right"],
517             [u"left", u"left", u"left", u"right"]
518         ),
519         u"align-itm": (
520             [u"left", u"right"],
521             [u"left", u"left", u"right"],
522             [u"left", u"left", u"left", u"right"]
523         ),
524         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
525     }
526
527     df_data = pd.DataFrame(data, columns=header)
528
529     if sort_data:
530         df_sorted = [df_data.sort_values(
531             by=[key, header[idx]], ascending=[True, True]
532             if key != header[idx] else [False, True]) for key in header]
533         df_sorted_rev = [df_data.sort_values(
534             by=[key, header[idx]], ascending=[False, True]
535             if key != header[idx] else [True, True]) for key in header]
536         df_sorted.extend(df_sorted_rev)
537     else:
538         df_sorted = df_data
539
540     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
541                    for idx in range(len(df_data))]]
542     table_header = dict(
543         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
544         fill_color=u"#7eade7",
545         align=params[u"align-hdr"][idx],
546         font=dict(
547             family=u"Courier New",
548             size=12
549         )
550     )
551
552     fig = go.Figure()
553
554     if sort_data:
555         for table in df_sorted:
556             columns = [table.get(col) for col in header]
557             fig.add_trace(
558                 go.Table(
559                     columnwidth=params[u"width"][idx],
560                     header=table_header,
561                     cells=dict(
562                         values=columns,
563                         fill_color=fill_color,
564                         align=params[u"align-itm"][idx],
565                         font=dict(
566                             family=u"Courier New",
567                             size=12
568                         )
569                     )
570                 )
571             )
572
573         buttons = list()
574         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
575         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
576         for idx, hdr in enumerate(menu_items):
577             visible = [False, ] * len(menu_items)
578             visible[idx] = True
579             buttons.append(
580                 dict(
581                     label=hdr.replace(u" [Mpps]", u""),
582                     method=u"update",
583                     args=[{u"visible": visible}],
584                 )
585             )
586
587         fig.update_layout(
588             updatemenus=[
589                 go.layout.Updatemenu(
590                     type=u"dropdown",
591                     direction=u"down",
592                     x=0.0,
593                     xanchor=u"left",
594                     y=1.002,
595                     yanchor=u"bottom",
596                     active=len(menu_items) - 1,
597                     buttons=list(buttons)
598                 )
599             ],
600         )
601     else:
602         fig.add_trace(
603             go.Table(
604                 columnwidth=params[u"width"][idx],
605                 header=table_header,
606                 cells=dict(
607                     values=[df_sorted.get(col) for col in header],
608                     fill_color=fill_color,
609                     align=params[u"align-itm"][idx],
610                     font=dict(
611                         family=u"Courier New",
612                         size=12
613                     )
614                 )
615             )
616         )
617
618     ploff.plot(
619         fig,
620         show_link=False,
621         auto_open=False,
622         filename=f"{out_file_name}_in.html"
623     )
624
625     if not generate_rst:
626         return
627
628     file_name = out_file_name.split(u"/")[-1]
629     if u"vpp" in out_file_name:
630         path = u"_tmp/src/vpp_performance_tests/comparisons/"
631     else:
632         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
633     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
634     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
635         rst_file.write(
636             u"\n"
637             u".. |br| raw:: html\n\n    <br />\n\n\n"
638             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
639             u".. |preout| raw:: html\n\n    </pre>\n\n"
640         )
641         if title:
642             rst_file.write(f"{title}\n")
643             rst_file.write(f"{u'`' * len(title)}\n\n")
644         rst_file.write(
645             u".. raw:: html\n\n"
646             f'    <iframe frameborder="0" scrolling="no" '
647             f'width="1600" height="1200" '
648             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
649             f'</iframe>\n\n'
650         )
651
652         if legend:
653             try:
654                 itm_lst = legend[1:-2].split(u"\n")
655                 rst_file.write(
656                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
657                 )
658             except IndexError as err:
659                 logging.error(f"Legend cannot be written to html file\n{err}")
660         if footnote:
661             try:
662                 itm_lst = footnote[1:].split(u"\n")
663                 rst_file.write(
664                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
665                 )
666             except IndexError as err:
667                 logging.error(f"Footnote cannot be written to html file\n{err}")
668
669
670 def table_soak_vs_ndr(table, input_data):
671     """Generate the table(s) with algorithm: table_soak_vs_ndr
672     specified in the specification file.
673
674     :param table: Table to generate.
675     :param input_data: Data to process.
676     :type table: pandas.Series
677     :type input_data: InputData
678     """
679
680     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
681
682     # Transform the data
683     logging.info(
684         f"    Creating the data set for the {table.get(u'type', u'')} "
685         f"{table.get(u'title', u'')}."
686     )
687     data = input_data.filter_data(table, continue_on_error=True)
688
689     # Prepare the header of the table
690     try:
691         header = [
692             u"Test Case",
693             f"Avg({table[u'reference'][u'title']})",
694             f"Stdev({table[u'reference'][u'title']})",
695             f"Avg({table[u'compare'][u'title']})",
696             f"Stdev{table[u'compare'][u'title']})",
697             u"Diff",
698             u"Stdev(Diff)"
699         ]
700         header_str = u";".join(header) + u"\n"
701         legend = (
702             u"\nLegend:\n"
703             f"Avg({table[u'reference'][u'title']}): "
704             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
705             f"from a series of runs of the listed tests.\n"
706             f"Stdev({table[u'reference'][u'title']}): "
707             f"Standard deviation value of {table[u'reference'][u'title']} "
708             f"[Mpps] computed from a series of runs of the listed tests.\n"
709             f"Avg({table[u'compare'][u'title']}): "
710             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
711             f"a series of runs of the listed tests.\n"
712             f"Stdev({table[u'compare'][u'title']}): "
713             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
714             f"computed from a series of runs of the listed tests.\n"
715             f"Diff({table[u'reference'][u'title']},"
716             f"{table[u'compare'][u'title']}): "
717             f"Percentage change calculated for mean values.\n"
718             u"Stdev(Diff): "
719             u"Standard deviation of percentage change calculated for mean "
720             u"values."
721         )
722     except (AttributeError, KeyError) as err:
723         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
724         return
725
726     # Create a list of available SOAK test results:
727     tbl_dict = dict()
728     for job, builds in table[u"compare"][u"data"].items():
729         for build in builds:
730             for tst_name, tst_data in data[job][str(build)].items():
731                 if tst_data[u"type"] == u"SOAK":
732                     tst_name_mod = tst_name.replace(u"-soak", u"")
733                     if tbl_dict.get(tst_name_mod, None) is None:
734                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
735                         nic = groups.group(0) if groups else u""
736                         name = (
737                             f"{nic}-"
738                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
739                         )
740                         tbl_dict[tst_name_mod] = {
741                             u"name": name,
742                             u"ref-data": list(),
743                             u"cmp-data": list()
744                         }
745                     try:
746                         tbl_dict[tst_name_mod][u"cmp-data"].append(
747                             tst_data[u"throughput"][u"LOWER"])
748                     except (KeyError, TypeError):
749                         pass
750     tests_lst = tbl_dict.keys()
751
752     # Add corresponding NDR test results:
753     for job, builds in table[u"reference"][u"data"].items():
754         for build in builds:
755             for tst_name, tst_data in data[job][str(build)].items():
756                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
757                     replace(u"-mrr", u"")
758                 if tst_name_mod not in tests_lst:
759                     continue
760                 try:
761                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
762                         continue
763                     if table[u"include-tests"] == u"MRR":
764                         result = (tst_data[u"result"][u"receive-rate"],
765                                   tst_data[u"result"][u"receive-stdev"])
766                     elif table[u"include-tests"] == u"PDR":
767                         result = \
768                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
769                     elif table[u"include-tests"] == u"NDR":
770                         result = \
771                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
772                     else:
773                         result = None
774                     if result is not None:
775                         tbl_dict[tst_name_mod][u"ref-data"].append(
776                             result)
777                 except (KeyError, TypeError):
778                     continue
779
780     tbl_lst = list()
781     for tst_name in tbl_dict:
782         item = [tbl_dict[tst_name][u"name"], ]
783         data_r = tbl_dict[tst_name][u"ref-data"]
784         if data_r:
785             if table[u"include-tests"] == u"MRR":
786                 data_r_mean = data_r[0][0]
787                 data_r_stdev = data_r[0][1]
788             else:
789                 data_r_mean = mean(data_r)
790                 data_r_stdev = stdev(data_r)
791             item.append(round(data_r_mean / 1e6, 1))
792             item.append(round(data_r_stdev / 1e6, 1))
793         else:
794             data_r_mean = None
795             data_r_stdev = None
796             item.extend([None, None])
797         data_c = tbl_dict[tst_name][u"cmp-data"]
798         if data_c:
799             if table[u"include-tests"] == u"MRR":
800                 data_c_mean = data_c[0][0]
801                 data_c_stdev = data_c[0][1]
802             else:
803                 data_c_mean = mean(data_c)
804                 data_c_stdev = stdev(data_c)
805             item.append(round(data_c_mean / 1e6, 1))
806             item.append(round(data_c_stdev / 1e6, 1))
807         else:
808             data_c_mean = None
809             data_c_stdev = None
810             item.extend([None, None])
811         if data_r_mean is not None and data_c_mean is not None:
812             delta, d_stdev = relative_change_stdev(
813                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
814             try:
815                 item.append(round(delta))
816             except ValueError:
817                 item.append(delta)
818             try:
819                 item.append(round(d_stdev))
820             except ValueError:
821                 item.append(d_stdev)
822             tbl_lst.append(item)
823
824     # Sort the table according to the relative change
825     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
826
827     # Generate csv tables:
828     csv_file_name = f"{table[u'output-file']}.csv"
829     with open(csv_file_name, u"wt") as file_handler:
830         file_handler.write(header_str)
831         for test in tbl_lst:
832             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
833
834     convert_csv_to_pretty_txt(
835         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
836     )
837     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
838         file_handler.write(legend)
839
840     # Generate html table:
841     _tpc_generate_html_table(
842         header,
843         tbl_lst,
844         table[u'output-file'],
845         legend=legend,
846         title=table.get(u"title", u"")
847     )
848
849
850 def table_perf_trending_dash(table, input_data):
851     """Generate the table(s) with algorithm:
852     table_perf_trending_dash
853     specified in the specification file.
854
855     :param table: Table to generate.
856     :param input_data: Data to process.
857     :type table: pandas.Series
858     :type input_data: InputData
859     """
860
861     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
862
863     # Transform the data
864     logging.info(
865         f"    Creating the data set for the {table.get(u'type', u'')} "
866         f"{table.get(u'title', u'')}."
867     )
868     data = input_data.filter_data(table, continue_on_error=True)
869
870     # Prepare the header of the tables
871     header = [
872         u"Test Case",
873         u"Trend [Mpps]",
874         u"Short-Term Change [%]",
875         u"Long-Term Change [%]",
876         u"Regressions [#]",
877         u"Progressions [#]"
878     ]
879     header_str = u",".join(header) + u"\n"
880
881     incl_tests = table.get(u"include-tests", u"MRR")
882
883     # Prepare data to the table:
884     tbl_dict = dict()
885     for job, builds in table[u"data"].items():
886         for build in builds:
887             for tst_name, tst_data in data[job][str(build)].items():
888                 if tst_name.lower() in table.get(u"ignore-list", list()):
889                     continue
890                 if tbl_dict.get(tst_name, None) is None:
891                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
892                     if not groups:
893                         continue
894                     nic = groups.group(0)
895                     tbl_dict[tst_name] = {
896                         u"name": f"{nic}-{tst_data[u'name']}",
897                         u"data": OrderedDict()
898                     }
899                 try:
900                     if incl_tests == u"MRR":
901                         tbl_dict[tst_name][u"data"][str(build)] = \
902                             tst_data[u"result"][u"receive-rate"]
903                     elif incl_tests == u"NDR":
904                         tbl_dict[tst_name][u"data"][str(build)] = \
905                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
906                     elif incl_tests == u"PDR":
907                         tbl_dict[tst_name][u"data"][str(build)] = \
908                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
909                 except (TypeError, KeyError):
910                     pass  # No data in output.xml for this test
911
912     tbl_lst = list()
913     for tst_name in tbl_dict:
914         data_t = tbl_dict[tst_name][u"data"]
915         if len(data_t) < 2:
916             continue
917
918         classification_lst, avgs, _ = classify_anomalies(data_t)
919
920         win_size = min(len(data_t), table[u"window"])
921         long_win_size = min(len(data_t), table[u"long-trend-window"])
922
923         try:
924             max_long_avg = max(
925                 [x for x in avgs[-long_win_size:-win_size]
926                  if not isnan(x)])
927         except ValueError:
928             max_long_avg = nan
929         last_avg = avgs[-1]
930         avg_week_ago = avgs[max(-win_size, -len(avgs))]
931
932         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
933             rel_change_last = nan
934         else:
935             rel_change_last = round(
936                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
937
938         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
939             rel_change_long = nan
940         else:
941             rel_change_long = round(
942                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
943
944         if classification_lst:
945             if isnan(rel_change_last) and isnan(rel_change_long):
946                 continue
947             if isnan(last_avg) or isnan(rel_change_last) or \
948                     isnan(rel_change_long):
949                 continue
950             tbl_lst.append(
951                 [tbl_dict[tst_name][u"name"],
952                  round(last_avg / 1e6, 2),
953                  rel_change_last,
954                  rel_change_long,
955                  classification_lst[-win_size+1:].count(u"regression"),
956                  classification_lst[-win_size+1:].count(u"progression")])
957
958     tbl_lst.sort(key=lambda rel: rel[0])
959     tbl_lst.sort(key=lambda rel: rel[3])
960     tbl_lst.sort(key=lambda rel: rel[2])
961
962     tbl_sorted = list()
963     for nrr in range(table[u"window"], -1, -1):
964         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
965         for nrp in range(table[u"window"], -1, -1):
966             tbl_out = [item for item in tbl_reg if item[5] == nrp]
967             tbl_sorted.extend(tbl_out)
968
969     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
970
971     logging.info(f"    Writing file: {file_name}")
972     with open(file_name, u"wt") as file_handler:
973         file_handler.write(header_str)
974         for test in tbl_sorted:
975             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
976
977     logging.info(f"    Writing file: {table[u'output-file']}.txt")
978     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
979
980
981 def _generate_url(testbed, test_name):
982     """Generate URL to a trending plot from the name of the test case.
983
984     :param testbed: The testbed used for testing.
985     :param test_name: The name of the test case.
986     :type testbed: str
987     :type test_name: str
988     :returns: The URL to the plot with the trending data for the given test
989         case.
990     :rtype str
991     """
992
993     if u"x520" in test_name:
994         nic = u"x520"
995     elif u"x710" in test_name:
996         nic = u"x710"
997     elif u"xl710" in test_name:
998         nic = u"xl710"
999     elif u"xxv710" in test_name:
1000         nic = u"xxv710"
1001     elif u"vic1227" in test_name:
1002         nic = u"vic1227"
1003     elif u"vic1385" in test_name:
1004         nic = u"vic1385"
1005     elif u"x553" in test_name:
1006         nic = u"x553"
1007     elif u"cx556" in test_name or u"cx556a" in test_name:
1008         nic = u"cx556a"
1009     else:
1010         nic = u""
1011
1012     if u"64b" in test_name:
1013         frame_size = u"64b"
1014     elif u"78b" in test_name:
1015         frame_size = u"78b"
1016     elif u"imix" in test_name:
1017         frame_size = u"imix"
1018     elif u"9000b" in test_name:
1019         frame_size = u"9000b"
1020     elif u"1518b" in test_name:
1021         frame_size = u"1518b"
1022     elif u"114b" in test_name:
1023         frame_size = u"114b"
1024     else:
1025         frame_size = u""
1026
1027     if u"1t1c" in test_name or \
1028         (u"-1c-" in test_name and
1029          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1030         cores = u"1t1c"
1031     elif u"2t2c" in test_name or \
1032          (u"-2c-" in test_name and
1033           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1034         cores = u"2t2c"
1035     elif u"4t4c" in test_name or \
1036          (u"-4c-" in test_name and
1037           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1038         cores = u"4t4c"
1039     elif u"2t1c" in test_name or \
1040          (u"-1c-" in test_name and
1041           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1042         cores = u"2t1c"
1043     elif u"4t2c" in test_name or \
1044          (u"-2c-" in test_name and
1045           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1046         cores = u"4t2c"
1047     elif u"8t4c" in test_name or \
1048          (u"-4c-" in test_name and
1049           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1050         cores = u"8t4c"
1051     else:
1052         cores = u""
1053
1054     if u"testpmd" in test_name:
1055         driver = u"testpmd"
1056     elif u"l3fwd" in test_name:
1057         driver = u"l3fwd"
1058     elif u"avf" in test_name:
1059         driver = u"avf"
1060     elif u"rdma" in test_name:
1061         driver = u"rdma"
1062     elif u"dnv" in testbed or u"tsh" in testbed:
1063         driver = u"ixgbe"
1064     else:
1065         driver = u"dpdk"
1066
1067     if u"macip-iacl1s" in test_name:
1068         bsf = u"features-macip-iacl1"
1069     elif u"macip-iacl10s" in test_name:
1070         bsf = u"features-macip-iacl10"
1071     elif u"macip-iacl50s" in test_name:
1072         bsf = u"features-macip-iacl50"
1073     elif u"iacl1s" in test_name:
1074         bsf = u"features-iacl1"
1075     elif u"iacl10s" in test_name:
1076         bsf = u"features-iacl10"
1077     elif u"iacl50s" in test_name:
1078         bsf = u"features-iacl50"
1079     elif u"oacl1s" in test_name:
1080         bsf = u"features-oacl1"
1081     elif u"oacl10s" in test_name:
1082         bsf = u"features-oacl10"
1083     elif u"oacl50s" in test_name:
1084         bsf = u"features-oacl50"
1085     elif u"nat44det" in test_name:
1086         bsf = u"nat44det-bidir"
1087     elif u"nat44ed" in test_name and u"udir" in test_name:
1088         bsf = u"nat44ed-udir"
1089     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1090         bsf = u"udp-cps"
1091     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1092         bsf = u"tcp-cps"
1093     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1094         bsf = u"udp-pps"
1095     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1096         bsf = u"tcp-pps"
1097     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1098         bsf = u"udp-tput"
1099     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1100         bsf = u"tcp-tput"
1101     elif u"udpsrcscale" in test_name:
1102         bsf = u"features-udp"
1103     elif u"iacl" in test_name:
1104         bsf = u"features"
1105     elif u"policer" in test_name:
1106         bsf = u"features"
1107     elif u"adl" in test_name:
1108         bsf = u"features"
1109     elif u"cop" in test_name:
1110         bsf = u"features"
1111     elif u"nat" in test_name:
1112         bsf = u"features"
1113     elif u"macip" in test_name:
1114         bsf = u"features"
1115     elif u"scale" in test_name:
1116         bsf = u"scale"
1117     elif u"base" in test_name:
1118         bsf = u"base"
1119     else:
1120         bsf = u"base"
1121
1122     if u"114b" in test_name and u"vhost" in test_name:
1123         domain = u"vts"
1124     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1125         domain = u"nat44"
1126         if u"nat44det" in test_name:
1127             domain += u"-det-bidir"
1128         else:
1129             domain += u"-ed"
1130         if u"udir" in test_name:
1131             domain += u"-unidir"
1132         elif u"-ethip4udp-" in test_name:
1133             domain += u"-udp"
1134         elif u"-ethip4tcp-" in test_name:
1135             domain += u"-tcp"
1136         if u"-cps" in test_name:
1137             domain += u"-cps"
1138         elif u"-pps" in test_name:
1139             domain += u"-pps"
1140         elif u"-tput" in test_name:
1141             domain += u"-tput"
1142     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1143         domain = u"dpdk"
1144     elif u"memif" in test_name:
1145         domain = u"container_memif"
1146     elif u"srv6" in test_name:
1147         domain = u"srv6"
1148     elif u"vhost" in test_name:
1149         domain = u"vhost"
1150         if u"vppl2xc" in test_name:
1151             driver += u"-vpp"
1152         else:
1153             driver += u"-testpmd"
1154         if u"lbvpplacp" in test_name:
1155             bsf += u"-link-bonding"
1156     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1157         domain = u"nf_service_density_vnfc"
1158     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1159         domain = u"nf_service_density_cnfc"
1160     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1161         domain = u"nf_service_density_cnfp"
1162     elif u"ipsec" in test_name:
1163         domain = u"ipsec"
1164         if u"sw" in test_name:
1165             bsf += u"-sw"
1166         elif u"hw" in test_name:
1167             bsf += u"-hw"
1168     elif u"ethip4vxlan" in test_name:
1169         domain = u"ip4_tunnels"
1170     elif u"ethip4udpgeneve" in test_name:
1171         domain = u"ip4_tunnels"
1172     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1173         domain = u"ip4"
1174     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1175         domain = u"ip6"
1176     elif u"l2xcbase" in test_name or \
1177             u"l2xcscale" in test_name or \
1178             u"l2bdbasemaclrn" in test_name or \
1179             u"l2bdscale" in test_name or \
1180             u"l2patch" in test_name:
1181         domain = u"l2"
1182     else:
1183         domain = u""
1184
1185     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1186     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1187
1188     return file_name + anchor_name
1189
1190
1191 def table_perf_trending_dash_html(table, input_data):
1192     """Generate the table(s) with algorithm:
1193     table_perf_trending_dash_html specified in the specification
1194     file.
1195
1196     :param table: Table to generate.
1197     :param input_data: Data to process.
1198     :type table: dict
1199     :type input_data: InputData
1200     """
1201
1202     _ = input_data
1203
1204     if not table.get(u"testbed", None):
1205         logging.error(
1206             f"The testbed is not defined for the table "
1207             f"{table.get(u'title', u'')}. Skipping."
1208         )
1209         return
1210
1211     test_type = table.get(u"test-type", u"MRR")
1212     if test_type not in (u"MRR", u"NDR", u"PDR"):
1213         logging.error(
1214             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1215             f"Skipping."
1216         )
1217         return
1218
1219     if test_type in (u"NDR", u"PDR"):
1220         lnk_dir = u"../ndrpdr_trending/"
1221         lnk_sufix = f"-{test_type.lower()}"
1222     else:
1223         lnk_dir = u"../trending/"
1224         lnk_sufix = u""
1225
1226     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1227
1228     try:
1229         with open(table[u"input-file"], u'rt') as csv_file:
1230             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1231     except KeyError:
1232         logging.warning(u"The input file is not defined.")
1233         return
1234     except csv.Error as err:
1235         logging.warning(
1236             f"Not possible to process the file {table[u'input-file']}.\n"
1237             f"{repr(err)}"
1238         )
1239         return
1240
1241     # Table:
1242     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1243
1244     # Table header:
1245     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1246     for idx, item in enumerate(csv_lst[0]):
1247         alignment = u"left" if idx == 0 else u"center"
1248         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1249         thead.text = item
1250
1251     # Rows:
1252     colors = {
1253         u"regression": (
1254             u"#ffcccc",
1255             u"#ff9999"
1256         ),
1257         u"progression": (
1258             u"#c6ecc6",
1259             u"#9fdf9f"
1260         ),
1261         u"normal": (
1262             u"#e9f1fb",
1263             u"#d4e4f7"
1264         )
1265     }
1266     for r_idx, row in enumerate(csv_lst[1:]):
1267         if int(row[4]):
1268             color = u"regression"
1269         elif int(row[5]):
1270             color = u"progression"
1271         else:
1272             color = u"normal"
1273         trow = ET.SubElement(
1274             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1275         )
1276
1277         # Columns:
1278         for c_idx, item in enumerate(row):
1279             tdata = ET.SubElement(
1280                 trow,
1281                 u"td",
1282                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1283             )
1284             # Name:
1285             if c_idx == 0 and table.get(u"add-links", True):
1286                 ref = ET.SubElement(
1287                     tdata,
1288                     u"a",
1289                     attrib=dict(
1290                         href=f"{lnk_dir}"
1291                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1292                         f"{lnk_sufix}"
1293                     )
1294                 )
1295                 ref.text = item
1296             else:
1297                 tdata.text = item
1298     try:
1299         with open(table[u"output-file"], u'w') as html_file:
1300             logging.info(f"    Writing file: {table[u'output-file']}")
1301             html_file.write(u".. raw:: html\n\n\t")
1302             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1303             html_file.write(u"\n\t<p><br><br></p>\n")
1304     except KeyError:
1305         logging.warning(u"The output file is not defined.")
1306         return
1307
1308
1309 def table_last_failed_tests(table, input_data):
1310     """Generate the table(s) with algorithm: table_last_failed_tests
1311     specified in the specification file.
1312
1313     :param table: Table to generate.
1314     :param input_data: Data to process.
1315     :type table: pandas.Series
1316     :type input_data: InputData
1317     """
1318
1319     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1320
1321     # Transform the data
1322     logging.info(
1323         f"    Creating the data set for the {table.get(u'type', u'')} "
1324         f"{table.get(u'title', u'')}."
1325     )
1326
1327     data = input_data.filter_data(table, continue_on_error=True)
1328
1329     if data is None or data.empty:
1330         logging.warning(
1331             f"    No data for the {table.get(u'type', u'')} "
1332             f"{table.get(u'title', u'')}."
1333         )
1334         return
1335
1336     tbl_list = list()
1337     for job, builds in table[u"data"].items():
1338         for build in builds:
1339             build = str(build)
1340             try:
1341                 version = input_data.metadata(job, build).get(u"version", u"")
1342             except KeyError:
1343                 logging.error(f"Data for {job}: {build} is not present.")
1344                 return
1345             tbl_list.append(build)
1346             tbl_list.append(version)
1347             failed_tests = list()
1348             passed = 0
1349             failed = 0
1350             for tst_data in data[job][build].values:
1351                 if tst_data[u"status"] != u"FAIL":
1352                     passed += 1
1353                     continue
1354                 failed += 1
1355                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1356                 if not groups:
1357                     continue
1358                 nic = groups.group(0)
1359                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1360             tbl_list.append(str(passed))
1361             tbl_list.append(str(failed))
1362             tbl_list.extend(failed_tests)
1363
1364     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1365     logging.info(f"    Writing file: {file_name}")
1366     with open(file_name, u"wt") as file_handler:
1367         for test in tbl_list:
1368             file_handler.write(test + u'\n')
1369
1370
1371 def table_failed_tests(table, input_data):
1372     """Generate the table(s) with algorithm: table_failed_tests
1373     specified in the specification file.
1374
1375     :param table: Table to generate.
1376     :param input_data: Data to process.
1377     :type table: pandas.Series
1378     :type input_data: InputData
1379     """
1380
1381     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1382
1383     # Transform the data
1384     logging.info(
1385         f"    Creating the data set for the {table.get(u'type', u'')} "
1386         f"{table.get(u'title', u'')}."
1387     )
1388     data = input_data.filter_data(table, continue_on_error=True)
1389
1390     test_type = u"MRR"
1391     if u"NDRPDR" in table.get(u"filter", list()):
1392         test_type = u"NDRPDR"
1393
1394     # Prepare the header of the tables
1395     header = [
1396         u"Test Case",
1397         u"Failures [#]",
1398         u"Last Failure [Time]",
1399         u"Last Failure [VPP-Build-Id]",
1400         u"Last Failure [CSIT-Job-Build-Id]"
1401     ]
1402
1403     # Generate the data for the table according to the model in the table
1404     # specification
1405
1406     now = dt.utcnow()
1407     timeperiod = timedelta(int(table.get(u"window", 7)))
1408
1409     tbl_dict = dict()
1410     for job, builds in table[u"data"].items():
1411         for build in builds:
1412             build = str(build)
1413             for tst_name, tst_data in data[job][build].items():
1414                 if tst_name.lower() in table.get(u"ignore-list", list()):
1415                     continue
1416                 if tbl_dict.get(tst_name, None) is None:
1417                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1418                     if not groups:
1419                         continue
1420                     nic = groups.group(0)
1421                     tbl_dict[tst_name] = {
1422                         u"name": f"{nic}-{tst_data[u'name']}",
1423                         u"data": OrderedDict()
1424                     }
1425                 try:
1426                     generated = input_data.metadata(job, build).\
1427                         get(u"generated", u"")
1428                     if not generated:
1429                         continue
1430                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1431                     if (now - then) <= timeperiod:
1432                         tbl_dict[tst_name][u"data"][build] = (
1433                             tst_data[u"status"],
1434                             generated,
1435                             input_data.metadata(job, build).get(u"version",
1436                                                                 u""),
1437                             build
1438                         )
1439                 except (TypeError, KeyError) as err:
1440                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1441
1442     max_fails = 0
1443     tbl_lst = list()
1444     for tst_data in tbl_dict.values():
1445         fails_nr = 0
1446         fails_last_date = u""
1447         fails_last_vpp = u""
1448         fails_last_csit = u""
1449         for val in tst_data[u"data"].values():
1450             if val[0] == u"FAIL":
1451                 fails_nr += 1
1452                 fails_last_date = val[1]
1453                 fails_last_vpp = val[2]
1454                 fails_last_csit = val[3]
1455         if fails_nr:
1456             max_fails = fails_nr if fails_nr > max_fails else max_fails
1457             tbl_lst.append([
1458                 tst_data[u"name"],
1459                 fails_nr,
1460                 fails_last_date,
1461                 fails_last_vpp,
1462                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1463                 f"-build-{fails_last_csit}"
1464             ])
1465
1466     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1467     tbl_sorted = list()
1468     for nrf in range(max_fails, -1, -1):
1469         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1470         tbl_sorted.extend(tbl_fails)
1471
1472     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1473     logging.info(f"    Writing file: {file_name}")
1474     with open(file_name, u"wt") as file_handler:
1475         file_handler.write(u",".join(header) + u"\n")
1476         for test in tbl_sorted:
1477             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1478
1479     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1480     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1481
1482
1483 def table_failed_tests_html(table, input_data):
1484     """Generate the table(s) with algorithm: table_failed_tests_html
1485     specified in the specification file.
1486
1487     :param table: Table to generate.
1488     :param input_data: Data to process.
1489     :type table: pandas.Series
1490     :type input_data: InputData
1491     """
1492
1493     _ = input_data
1494
1495     if not table.get(u"testbed", None):
1496         logging.error(
1497             f"The testbed is not defined for the table "
1498             f"{table.get(u'title', u'')}. Skipping."
1499         )
1500         return
1501
1502     test_type = table.get(u"test-type", u"MRR")
1503     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1504         logging.error(
1505             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1506             f"Skipping."
1507         )
1508         return
1509
1510     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1511         lnk_dir = u"../ndrpdr_trending/"
1512         lnk_sufix = u"-pdr"
1513     else:
1514         lnk_dir = u"../trending/"
1515         lnk_sufix = u""
1516
1517     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1518
1519     try:
1520         with open(table[u"input-file"], u'rt') as csv_file:
1521             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1522     except KeyError:
1523         logging.warning(u"The input file is not defined.")
1524         return
1525     except csv.Error as err:
1526         logging.warning(
1527             f"Not possible to process the file {table[u'input-file']}.\n"
1528             f"{repr(err)}"
1529         )
1530         return
1531
1532     # Table:
1533     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1534
1535     # Table header:
1536     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1537     for idx, item in enumerate(csv_lst[0]):
1538         alignment = u"left" if idx == 0 else u"center"
1539         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1540         thead.text = item
1541
1542     # Rows:
1543     colors = (u"#e9f1fb", u"#d4e4f7")
1544     for r_idx, row in enumerate(csv_lst[1:]):
1545         background = colors[r_idx % 2]
1546         trow = ET.SubElement(
1547             failed_tests, u"tr", attrib=dict(bgcolor=background)
1548         )
1549
1550         # Columns:
1551         for c_idx, item in enumerate(row):
1552             tdata = ET.SubElement(
1553                 trow,
1554                 u"td",
1555                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1556             )
1557             # Name:
1558             if c_idx == 0 and table.get(u"add-links", True):
1559                 ref = ET.SubElement(
1560                     tdata,
1561                     u"a",
1562                     attrib=dict(
1563                         href=f"{lnk_dir}"
1564                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1565                         f"{lnk_sufix}"
1566                     )
1567                 )
1568                 ref.text = item
1569             else:
1570                 tdata.text = item
1571     try:
1572         with open(table[u"output-file"], u'w') as html_file:
1573             logging.info(f"    Writing file: {table[u'output-file']}")
1574             html_file.write(u".. raw:: html\n\n\t")
1575             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1576             html_file.write(u"\n\t<p><br><br></p>\n")
1577     except KeyError:
1578         logging.warning(u"The output file is not defined.")
1579         return
1580
1581
1582 def table_comparison(table, input_data):
1583     """Generate the table(s) with algorithm: table_comparison
1584     specified in the specification file.
1585
1586     :param table: Table to generate.
1587     :param input_data: Data to process.
1588     :type table: pandas.Series
1589     :type input_data: InputData
1590     """
1591     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1592
1593     # Transform the data
1594     logging.info(
1595         f"    Creating the data set for the {table.get(u'type', u'')} "
1596         f"{table.get(u'title', u'')}."
1597     )
1598
1599     columns = table.get(u"columns", None)
1600     if not columns:
1601         logging.error(
1602             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1603         )
1604         return
1605
1606     cols = list()
1607     for idx, col in enumerate(columns):
1608         if col.get(u"data-set", None) is None:
1609             logging.warning(f"No data for column {col.get(u'title', u'')}")
1610             continue
1611         tag = col.get(u"tag", None)
1612         data = input_data.filter_data(
1613             table,
1614             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1615             data=col[u"data-set"],
1616             continue_on_error=True
1617         )
1618         col_data = {
1619             u"title": col.get(u"title", f"Column{idx}"),
1620             u"data": dict()
1621         }
1622         for builds in data.values:
1623             for build in builds:
1624                 for tst_name, tst_data in build.items():
1625                     if tag and tag not in tst_data[u"tags"]:
1626                         continue
1627                     tst_name_mod = \
1628                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1629                         replace(u"2n1l-", u"")
1630                     if col_data[u"data"].get(tst_name_mod, None) is None:
1631                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1632                         if u"across testbeds" in table[u"title"].lower() or \
1633                                 u"across topologies" in table[u"title"].lower():
1634                             name = _tpc_modify_displayed_test_name(name)
1635                         col_data[u"data"][tst_name_mod] = {
1636                             u"name": name,
1637                             u"replace": True,
1638                             u"data": list(),
1639                             u"mean": None,
1640                             u"stdev": None
1641                         }
1642                     _tpc_insert_data(
1643                         target=col_data[u"data"][tst_name_mod],
1644                         src=tst_data,
1645                         include_tests=table[u"include-tests"]
1646                     )
1647
1648         replacement = col.get(u"data-replacement", None)
1649         if replacement:
1650             rpl_data = input_data.filter_data(
1651                 table,
1652                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1653                 data=replacement,
1654                 continue_on_error=True
1655             )
1656             for builds in rpl_data.values:
1657                 for build in builds:
1658                     for tst_name, tst_data in build.items():
1659                         if tag and tag not in tst_data[u"tags"]:
1660                             continue
1661                         tst_name_mod = \
1662                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1663                             replace(u"2n1l-", u"")
1664                         if col_data[u"data"].get(tst_name_mod, None) is None:
1665                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1666                             if u"across testbeds" in table[u"title"].lower() \
1667                                     or u"across topologies" in \
1668                                     table[u"title"].lower():
1669                                 name = _tpc_modify_displayed_test_name(name)
1670                             col_data[u"data"][tst_name_mod] = {
1671                                 u"name": name,
1672                                 u"replace": False,
1673                                 u"data": list(),
1674                                 u"mean": None,
1675                                 u"stdev": None
1676                             }
1677                         if col_data[u"data"][tst_name_mod][u"replace"]:
1678                             col_data[u"data"][tst_name_mod][u"replace"] = False
1679                             col_data[u"data"][tst_name_mod][u"data"] = list()
1680                         _tpc_insert_data(
1681                             target=col_data[u"data"][tst_name_mod],
1682                             src=tst_data,
1683                             include_tests=table[u"include-tests"]
1684                         )
1685
1686         if table[u"include-tests"] in (u"NDR", u"PDR"):
1687             for tst_name, tst_data in col_data[u"data"].items():
1688                 if tst_data[u"data"]:
1689                     tst_data[u"mean"] = mean(tst_data[u"data"])
1690                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1691
1692         cols.append(col_data)
1693
1694     tbl_dict = dict()
1695     for col in cols:
1696         for tst_name, tst_data in col[u"data"].items():
1697             if tbl_dict.get(tst_name, None) is None:
1698                 tbl_dict[tst_name] = {
1699                     "name": tst_data[u"name"]
1700                 }
1701             tbl_dict[tst_name][col[u"title"]] = {
1702                 u"mean": tst_data[u"mean"],
1703                 u"stdev": tst_data[u"stdev"]
1704             }
1705
1706     if not tbl_dict:
1707         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1708         return
1709
1710     tbl_lst = list()
1711     for tst_data in tbl_dict.values():
1712         row = [tst_data[u"name"], ]
1713         for col in cols:
1714             row.append(tst_data.get(col[u"title"], None))
1715         tbl_lst.append(row)
1716
1717     comparisons = table.get(u"comparisons", None)
1718     rcas = list()
1719     if comparisons and isinstance(comparisons, list):
1720         for idx, comp in enumerate(comparisons):
1721             try:
1722                 col_ref = int(comp[u"reference"])
1723                 col_cmp = int(comp[u"compare"])
1724             except KeyError:
1725                 logging.warning(u"Comparison: No references defined! Skipping.")
1726                 comparisons.pop(idx)
1727                 continue
1728             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1729                     col_ref == col_cmp):
1730                 logging.warning(f"Wrong values of reference={col_ref} "
1731                                 f"and/or compare={col_cmp}. Skipping.")
1732                 comparisons.pop(idx)
1733                 continue
1734             rca_file_name = comp.get(u"rca-file", None)
1735             if rca_file_name:
1736                 try:
1737                     with open(rca_file_name, u"r") as file_handler:
1738                         rcas.append(
1739                             {
1740                                 u"title": f"RCA{idx + 1}",
1741                                 u"data": load(file_handler, Loader=FullLoader)
1742                             }
1743                         )
1744                 except (YAMLError, IOError) as err:
1745                     logging.warning(
1746                         f"The RCA file {rca_file_name} does not exist or "
1747                         f"it is corrupted!"
1748                     )
1749                     logging.debug(repr(err))
1750                     rcas.append(None)
1751             else:
1752                 rcas.append(None)
1753     else:
1754         comparisons = None
1755
1756     tbl_cmp_lst = list()
1757     if comparisons:
1758         for row in tbl_lst:
1759             new_row = deepcopy(row)
1760             for comp in comparisons:
1761                 ref_itm = row[int(comp[u"reference"])]
1762                 if ref_itm is None and \
1763                         comp.get(u"reference-alt", None) is not None:
1764                     ref_itm = row[int(comp[u"reference-alt"])]
1765                 cmp_itm = row[int(comp[u"compare"])]
1766                 if ref_itm is not None and cmp_itm is not None and \
1767                         ref_itm[u"mean"] is not None and \
1768                         cmp_itm[u"mean"] is not None and \
1769                         ref_itm[u"stdev"] is not None and \
1770                         cmp_itm[u"stdev"] is not None:
1771                     delta, d_stdev = relative_change_stdev(
1772                         ref_itm[u"mean"], cmp_itm[u"mean"],
1773                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1774                     )
1775                     if delta is None:
1776                         break
1777                     new_row.append({
1778                         u"mean": delta * 1e6,
1779                         u"stdev": d_stdev * 1e6
1780                     })
1781                 else:
1782                     break
1783             else:
1784                 tbl_cmp_lst.append(new_row)
1785
1786     try:
1787         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1788         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1789     except TypeError as err:
1790         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1791
1792     tbl_for_csv = list()
1793     for line in tbl_cmp_lst:
1794         row = [line[0], ]
1795         for idx, itm in enumerate(line[1:]):
1796             if itm is None or not isinstance(itm, dict) or\
1797                     itm.get(u'mean', None) is None or \
1798                     itm.get(u'stdev', None) is None:
1799                 row.append(u"NT")
1800                 row.append(u"NT")
1801             else:
1802                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1803                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1804         for rca in rcas:
1805             if rca is None:
1806                 continue
1807             rca_nr = rca[u"data"].get(row[0], u"-")
1808             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1809         tbl_for_csv.append(row)
1810
1811     header_csv = [u"Test Case", ]
1812     for col in cols:
1813         header_csv.append(f"Avg({col[u'title']})")
1814         header_csv.append(f"Stdev({col[u'title']})")
1815     for comp in comparisons:
1816         header_csv.append(
1817             f"Avg({comp.get(u'title', u'')})"
1818         )
1819         header_csv.append(
1820             f"Stdev({comp.get(u'title', u'')})"
1821         )
1822     for rca in rcas:
1823         if rca:
1824             header_csv.append(rca[u"title"])
1825
1826     legend_lst = table.get(u"legend", None)
1827     if legend_lst is None:
1828         legend = u""
1829     else:
1830         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1831
1832     footnote = u""
1833     if rcas and any(rcas):
1834         footnote += u"\nRoot Cause Analysis:\n"
1835         for rca in rcas:
1836             if rca:
1837                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1838
1839     csv_file_name = f"{table[u'output-file']}-csv.csv"
1840     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1841         file_handler.write(
1842             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1843         )
1844         for test in tbl_for_csv:
1845             file_handler.write(
1846                 u",".join([f'"{item}"' for item in test]) + u"\n"
1847             )
1848         if legend_lst:
1849             for item in legend_lst:
1850                 file_handler.write(f'"{item}"\n')
1851         if footnote:
1852             for itm in footnote.split(u"\n"):
1853                 file_handler.write(f'"{itm}"\n')
1854
1855     tbl_tmp = list()
1856     max_lens = [0, ] * len(tbl_cmp_lst[0])
1857     for line in tbl_cmp_lst:
1858         row = [line[0], ]
1859         for idx, itm in enumerate(line[1:]):
1860             if itm is None or not isinstance(itm, dict) or \
1861                     itm.get(u'mean', None) is None or \
1862                     itm.get(u'stdev', None) is None:
1863                 new_itm = u"NT"
1864             else:
1865                 if idx < len(cols):
1866                     new_itm = (
1867                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1868                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1869                         replace(u"nan", u"NaN")
1870                     )
1871                 else:
1872                     new_itm = (
1873                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1874                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1875                         replace(u"nan", u"NaN")
1876                     )
1877             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1878                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1879             row.append(new_itm)
1880
1881         tbl_tmp.append(row)
1882
1883     header = [u"Test Case", ]
1884     header.extend([col[u"title"] for col in cols])
1885     header.extend([comp.get(u"title", u"") for comp in comparisons])
1886
1887     tbl_final = list()
1888     for line in tbl_tmp:
1889         row = [line[0], ]
1890         for idx, itm in enumerate(line[1:]):
1891             if itm in (u"NT", u"NaN"):
1892                 row.append(itm)
1893                 continue
1894             itm_lst = itm.rsplit(u"\u00B1", 1)
1895             itm_lst[-1] = \
1896                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1897             itm_str = u"\u00B1".join(itm_lst)
1898
1899             if idx >= len(cols):
1900                 # Diffs
1901                 rca = rcas[idx - len(cols)]
1902                 if rca:
1903                     # Add rcas to diffs
1904                     rca_nr = rca[u"data"].get(row[0], None)
1905                     if rca_nr:
1906                         hdr_len = len(header[idx + 1]) - 1
1907                         if hdr_len < 19:
1908                             hdr_len = 19
1909                         rca_nr = f"[{rca_nr}]"
1910                         itm_str = (
1911                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1912                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1913                             f"{itm_str}"
1914                         )
1915             row.append(itm_str)
1916         tbl_final.append(row)
1917
1918     # Generate csv tables:
1919     csv_file_name = f"{table[u'output-file']}.csv"
1920     logging.info(f"    Writing the file {csv_file_name}")
1921     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1922         file_handler.write(u";".join(header) + u"\n")
1923         for test in tbl_final:
1924             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1925
1926     # Generate txt table:
1927     txt_file_name = f"{table[u'output-file']}.txt"
1928     logging.info(f"    Writing the file {txt_file_name}")
1929     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1930
1931     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1932         file_handler.write(legend)
1933         file_handler.write(footnote)
1934
1935     # Generate html table:
1936     _tpc_generate_html_table(
1937         header,
1938         tbl_final,
1939         table[u'output-file'],
1940         legend=legend,
1941         footnote=footnote,
1942         sort_data=False,
1943         title=table.get(u"title", u"")
1944     )
1945
1946
1947 def table_weekly_comparison(table, in_data):
1948     """Generate the table(s) with algorithm: table_weekly_comparison
1949     specified in the specification file.
1950
1951     :param table: Table to generate.
1952     :param in_data: Data to process.
1953     :type table: pandas.Series
1954     :type in_data: InputData
1955     """
1956     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1957
1958     # Transform the data
1959     logging.info(
1960         f"    Creating the data set for the {table.get(u'type', u'')} "
1961         f"{table.get(u'title', u'')}."
1962     )
1963
1964     incl_tests = table.get(u"include-tests", None)
1965     if incl_tests not in (u"NDR", u"PDR"):
1966         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1967         return
1968
1969     nr_cols = table.get(u"nr-of-data-columns", None)
1970     if not nr_cols or nr_cols < 2:
1971         logging.error(
1972             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1973         )
1974         return
1975
1976     data = in_data.filter_data(
1977         table,
1978         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1979         continue_on_error=True
1980     )
1981
1982     header = [
1983         [u"VPP Version", ],
1984         [u"Start Timestamp", ],
1985         [u"CSIT Build", ],
1986         [u"CSIT Testbed", ]
1987     ]
1988     tbl_dict = dict()
1989     idx = 0
1990     tb_tbl = table.get(u"testbeds", None)
1991     for job_name, job_data in data.items():
1992         for build_nr, build in job_data.items():
1993             if idx >= nr_cols:
1994                 break
1995             if build.empty:
1996                 continue
1997
1998             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1999             if tb_ip and tb_tbl:
2000                 testbed = tb_tbl.get(tb_ip, u"")
2001             else:
2002                 testbed = u""
2003             header[2].insert(1, build_nr)
2004             header[3].insert(1, testbed)
2005             header[1].insert(
2006                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2007             )
2008             header[0].insert(
2009                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2010             )
2011
2012             for tst_name, tst_data in build.items():
2013                 tst_name_mod = \
2014                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2015                 if not tbl_dict.get(tst_name_mod, None):
2016                     tbl_dict[tst_name_mod] = dict(
2017                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2018                     )
2019                 try:
2020                     tbl_dict[tst_name_mod][-idx - 1] = \
2021                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2022                 except (TypeError, IndexError, KeyError, ValueError):
2023                     pass
2024             idx += 1
2025
2026     if idx < nr_cols:
2027         logging.error(u"Not enough data to build the table! Skipping")
2028         return
2029
2030     cmp_dict = dict()
2031     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2032         idx_ref = cmp.get(u"reference", None)
2033         idx_cmp = cmp.get(u"compare", None)
2034         if idx_ref is None or idx_cmp is None:
2035             continue
2036         header[0].append(
2037             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2038             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2039         )
2040         header[1].append(u"")
2041         header[2].append(u"")
2042         header[3].append(u"")
2043         for tst_name, tst_data in tbl_dict.items():
2044             if not cmp_dict.get(tst_name, None):
2045                 cmp_dict[tst_name] = list()
2046             ref_data = tst_data.get(idx_ref, None)
2047             cmp_data = tst_data.get(idx_cmp, None)
2048             if ref_data is None or cmp_data is None:
2049                 cmp_dict[tst_name].append(float(u'nan'))
2050             else:
2051                 cmp_dict[tst_name].append(
2052                     relative_change(ref_data, cmp_data)
2053                 )
2054
2055     tbl_lst_none = list()
2056     tbl_lst = list()
2057     for tst_name, tst_data in tbl_dict.items():
2058         itm_lst = [tst_data[u"name"], ]
2059         for idx in range(nr_cols):
2060             item = tst_data.get(-idx - 1, None)
2061             if item is None:
2062                 itm_lst.insert(1, None)
2063             else:
2064                 itm_lst.insert(1, round(item / 1e6, 1))
2065         itm_lst.extend(
2066             [
2067                 None if itm is None else round(itm, 1)
2068                 for itm in cmp_dict[tst_name]
2069             ]
2070         )
2071         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2072             tbl_lst_none.append(itm_lst)
2073         else:
2074             tbl_lst.append(itm_lst)
2075
2076     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2077     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2078     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2079     tbl_lst.extend(tbl_lst_none)
2080
2081     # Generate csv table:
2082     csv_file_name = f"{table[u'output-file']}.csv"
2083     logging.info(f"    Writing the file {csv_file_name}")
2084     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2085         for hdr in header:
2086             file_handler.write(u",".join(hdr) + u"\n")
2087         for test in tbl_lst:
2088             file_handler.write(u",".join(
2089                 [
2090                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2091                     replace(u"null", u"-") for item in test
2092                 ]
2093             ) + u"\n")
2094
2095     txt_file_name = f"{table[u'output-file']}.txt"
2096     logging.info(f"    Writing the file {txt_file_name}")
2097     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2098
2099     # Reorganize header in txt table
2100     txt_table = list()
2101     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2102         for line in list(file_handler):
2103             txt_table.append(line)
2104     try:
2105         txt_table.insert(5, txt_table.pop(2))
2106         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2107             file_handler.writelines(txt_table)
2108     except IndexError:
2109         pass
2110
2111     # Generate html table:
2112     hdr_html = [
2113         u"<br>".join(row) for row in zip(*header)
2114     ]
2115     _tpc_generate_html_table(
2116         hdr_html,
2117         tbl_lst,
2118         table[u'output-file'],
2119         sort_data=True,
2120         title=table.get(u"title", u""),
2121         generate_rst=False
2122     )