PAL: Integrate new comp tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_perf_comparison": table_perf_comparison,
54         u"table_perf_comparison_nic": table_perf_comparison_nic,
55         u"table_nics_comparison": table_nics_comparison,
56         u"table_soak_vs_ndr": table_soak_vs_ndr,
57         u"table_perf_trending_dash": table_perf_trending_dash,
58         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59         u"table_last_failed_tests": table_last_failed_tests,
60         u"table_failed_tests": table_failed_tests,
61         u"table_failed_tests_html": table_failed_tests_html,
62         u"table_oper_data_html": table_oper_data_html,
63         u"table_comparison": table_comparison
64     }
65
66     logging.info(u"Generating the tables ...")
67     for table in spec.tables:
68         try:
69             generator[table[u"algorithm"]](table, data)
70         except NameError as err:
71             logging.error(
72                 f"Probably algorithm {table[u'algorithm']} is not defined: "
73                 f"{repr(err)}"
74             )
75     logging.info(u"Done.")
76
77
78 def table_oper_data_html(table, input_data):
79     """Generate the table(s) with algorithm: html_table_oper_data
80     specified in the specification file.
81
82     :param table: Table to generate.
83     :param input_data: Data to process.
84     :type table: pandas.Series
85     :type input_data: InputData
86     """
87
88     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
89     # Transform the data
90     logging.info(
91         f"    Creating the data set for the {table.get(u'type', u'')} "
92         f"{table.get(u'title', u'')}."
93     )
94     data = input_data.filter_data(
95         table,
96         params=[u"name", u"parent", u"show-run", u"type"],
97         continue_on_error=True
98     )
99     if data.empty:
100         return
101     data = input_data.merge_data(data)
102
103     sort_tests = table.get(u"sort", None)
104     if sort_tests:
105         args = dict(
106             inplace=True,
107             ascending=(sort_tests == u"ascending")
108         )
109         data.sort_index(**args)
110
111     suites = input_data.filter_data(
112         table,
113         continue_on_error=True,
114         data_set=u"suites"
115     )
116     if suites.empty:
117         return
118     suites = input_data.merge_data(suites)
119
120     def _generate_html_table(tst_data):
121         """Generate an HTML table with operational data for the given test.
122
123         :param tst_data: Test data to be used to generate the table.
124         :type tst_data: pandas.Series
125         :returns: HTML table with operational data.
126         :rtype: str
127         """
128
129         colors = {
130             u"header": u"#7eade7",
131             u"empty": u"#ffffff",
132             u"body": (u"#e9f1fb", u"#d4e4f7")
133         }
134
135         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
136
137         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138         thead = ET.SubElement(
139             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140         )
141         thead.text = tst_data[u"name"]
142
143         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144         thead = ET.SubElement(
145             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146         )
147         thead.text = u"\t"
148
149         if tst_data.get(u"show-run", u"No Data") == u"No Data":
150             trow = ET.SubElement(
151                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
152             )
153             tcol = ET.SubElement(
154                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
155             )
156             tcol.text = u"No Data"
157
158             trow = ET.SubElement(
159                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
160             )
161             thead = ET.SubElement(
162                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
163             )
164             font = ET.SubElement(
165                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166             )
167             font.text = u"."
168             return str(ET.tostring(tbl, encoding=u"unicode"))
169
170         tbl_hdr = (
171             u"Name",
172             u"Nr of Vectors",
173             u"Nr of Packets",
174             u"Suspends",
175             u"Cycles per Packet",
176             u"Average Vector Size"
177         )
178
179         for dut_data in tst_data[u"show-run"].values():
180             trow = ET.SubElement(
181                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
182             )
183             tcol = ET.SubElement(
184                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
185             )
186             if dut_data.get(u"threads", None) is None:
187                 tcol.text = u"No Data"
188                 continue
189
190             bold = ET.SubElement(tcol, u"b")
191             bold.text = (
192                 f"Host IP: {dut_data.get(u'host', '')}, "
193                 f"Socket: {dut_data.get(u'socket', '')}"
194             )
195             trow = ET.SubElement(
196                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
197             )
198             thead = ET.SubElement(
199                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200             )
201             thead.text = u"\t"
202
203             for thread_nr, thread in dut_data[u"threads"].items():
204                 trow = ET.SubElement(
205                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
206                 )
207                 tcol = ET.SubElement(
208                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
209                 )
210                 bold = ET.SubElement(tcol, u"b")
211                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212                 trow = ET.SubElement(
213                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
214                 )
215                 for idx, col in enumerate(tbl_hdr):
216                     tcol = ET.SubElement(
217                         trow, u"td",
218                         attrib=dict(align=u"right" if idx else u"left")
219                     )
220                     font = ET.SubElement(
221                         tcol, u"font", attrib=dict(size=u"2")
222                     )
223                     bold = ET.SubElement(font, u"b")
224                     bold.text = col
225                 for row_nr, row in enumerate(thread):
226                     trow = ET.SubElement(
227                         tbl, u"tr",
228                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
229                     )
230                     for idx, col in enumerate(row):
231                         tcol = ET.SubElement(
232                             trow, u"td",
233                             attrib=dict(align=u"right" if idx else u"left")
234                         )
235                         font = ET.SubElement(
236                             tcol, u"font", attrib=dict(size=u"2")
237                         )
238                         if isinstance(col, float):
239                             font.text = f"{col:.2f}"
240                         else:
241                             font.text = str(col)
242                 trow = ET.SubElement(
243                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
244                 )
245                 thead = ET.SubElement(
246                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247                 )
248                 thead.text = u"\t"
249
250         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251         thead = ET.SubElement(
252             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253         )
254         font = ET.SubElement(
255             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256         )
257         font.text = u"."
258
259         return str(ET.tostring(tbl, encoding=u"unicode"))
260
261     for suite in suites.values:
262         html_table = str()
263         for test_data in data.values:
264             if test_data[u"parent"] not in suite[u"name"]:
265                 continue
266             html_table += _generate_html_table(test_data)
267         if not html_table:
268             continue
269         try:
270             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271             with open(f"{file_name}", u'w') as html_file:
272                 logging.info(f"    Writing file: {file_name}")
273                 html_file.write(u".. raw:: html\n\n\t")
274                 html_file.write(html_table)
275                 html_file.write(u"\n\t<p><br><br></p>\n")
276         except KeyError:
277             logging.warning(u"The output file is not defined.")
278             return
279     logging.info(u"  Done.")
280
281
282 def table_merged_details(table, input_data):
283     """Generate the table(s) with algorithm: table_merged_details
284     specified in the specification file.
285
286     :param table: Table to generate.
287     :param input_data: Data to process.
288     :type table: pandas.Series
289     :type input_data: InputData
290     """
291
292     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
293
294     # Transform the data
295     logging.info(
296         f"    Creating the data set for the {table.get(u'type', u'')} "
297         f"{table.get(u'title', u'')}."
298     )
299     data = input_data.filter_data(table, continue_on_error=True)
300     data = input_data.merge_data(data)
301
302     sort_tests = table.get(u"sort", None)
303     if sort_tests:
304         args = dict(
305             inplace=True,
306             ascending=(sort_tests == u"ascending")
307         )
308         data.sort_index(**args)
309
310     suites = input_data.filter_data(
311         table, continue_on_error=True, data_set=u"suites")
312     suites = input_data.merge_data(suites)
313
314     # Prepare the header of the tables
315     header = list()
316     for column in table[u"columns"]:
317         header.append(
318             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319         )
320
321     for suite in suites.values:
322         # Generate data
323         suite_name = suite[u"name"]
324         table_lst = list()
325         for test in data.keys():
326             if data[test][u"parent"] not in suite_name:
327                 continue
328             row_lst = list()
329             for column in table[u"columns"]:
330                 try:
331                     col_data = str(data[test][column[
332                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
333                     # Do not include tests with "Test Failed" in test message
334                     if u"Test Failed" in col_data:
335                         continue
336                     col_data = col_data.replace(
337                         u"No Data", u"Not Captured     "
338                     )
339                     if column[u"data"].split(u" ")[1] in (u"name", ):
340                         if len(col_data) > 30:
341                             col_data_lst = col_data.split(u"-")
342                             half = int(len(col_data_lst) / 2)
343                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
344                                        f"- |br| " \
345                                        f"{u'-'.join(col_data_lst[half:])}"
346                         col_data = f" |prein| {col_data} |preout| "
347                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
348                         # Temporary solution: remove NDR results from message:
349                         if bool(table.get(u'remove-ndr', False)):
350                             try:
351                                 col_data = col_data.split(u" |br| ", 1)[1]
352                             except IndexError:
353                                 pass
354                         col_data = f" |prein| {col_data} |preout| "
355                     elif column[u"data"].split(u" ")[1] in \
356                             (u"conf-history", u"show-run"):
357                         col_data = col_data.replace(u" |br| ", u"", 1)
358                         col_data = f" |prein| {col_data[:-5]} |preout| "
359                     row_lst.append(f'"{col_data}"')
360                 except KeyError:
361                     row_lst.append(u'"Not captured"')
362             if len(row_lst) == len(table[u"columns"]):
363                 table_lst.append(row_lst)
364
365         # Write the data to file
366         if table_lst:
367             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369             logging.info(f"      Writing file: {file_name}")
370             with open(file_name, u"wt") as file_handler:
371                 file_handler.write(u",".join(header) + u"\n")
372                 for item in table_lst:
373                     file_handler.write(u",".join(item) + u"\n")
374
375     logging.info(u"  Done.")
376
377
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379     """Modify a test name by replacing its parts.
380
381     :param test_name: Test name to be modified.
382     :param ignore_nic: If True, NIC is removed from TC name.
383     :type test_name: str
384     :type ignore_nic: bool
385     :returns: Modified test name.
386     :rtype: str
387     """
388     test_name_mod = test_name.\
389         replace(u"-ndrpdrdisc", u""). \
390         replace(u"-ndrpdr", u"").\
391         replace(u"-pdrdisc", u""). \
392         replace(u"-ndrdisc", u"").\
393         replace(u"-pdr", u""). \
394         replace(u"-ndr", u""). \
395         replace(u"1t1c", u"1c").\
396         replace(u"2t1c", u"1c"). \
397         replace(u"2t2c", u"2c").\
398         replace(u"4t2c", u"2c"). \
399         replace(u"4t4c", u"4c").\
400         replace(u"8t4c", u"4c")
401
402     if ignore_nic:
403         return re.sub(REGEX_NIC, u"", test_name_mod)
404     return test_name_mod
405
406
407 def _tpc_modify_displayed_test_name(test_name):
408     """Modify a test name which is displayed in a table by replacing its parts.
409
410     :param test_name: Test name to be modified.
411     :type test_name: str
412     :returns: Modified test name.
413     :rtype: str
414     """
415     return test_name.\
416         replace(u"1t1c", u"1c").\
417         replace(u"2t1c", u"1c"). \
418         replace(u"2t2c", u"2c").\
419         replace(u"4t2c", u"2c"). \
420         replace(u"4t4c", u"4c").\
421         replace(u"8t4c", u"4c")
422
423
424 def _tpc_insert_data(target, src, include_tests):
425     """Insert src data to the target structure.
426
427     :param target: Target structure where the data is placed.
428     :param src: Source data to be placed into the target stucture.
429     :param include_tests: Which results will be included (MRR, NDR, PDR).
430     :type target: list
431     :type src: dict
432     :type include_tests: str
433     """
434     try:
435         if include_tests == u"MRR":
436             target.append(
437                 (
438                     src[u"result"][u"receive-rate"],
439                     src[u"result"][u"receive-stdev"]
440                 )
441             )
442         elif include_tests == u"PDR":
443             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
444         elif include_tests == u"NDR":
445             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
446     except (KeyError, TypeError):
447         pass
448
449
450 def _tpc_sort_table(table):
451     """Sort the table this way:
452
453     1. Put "New in CSIT-XXXX" at the first place.
454     2. Put "See footnote" at the second place.
455     3. Sort the rest by "Delta".
456
457     :param table: Table to sort.
458     :type table: list
459     :returns: Sorted table.
460     :rtype: list
461     """
462
463     tbl_new = list()
464     tbl_see = list()
465     tbl_delta = list()
466     for item in table:
467         if isinstance(item[-1], str):
468             if u"New in CSIT" in item[-1]:
469                 tbl_new.append(item)
470             elif u"See footnote" in item[-1]:
471                 tbl_see.append(item)
472         else:
473             tbl_delta.append(item)
474
475     # Sort the tables:
476     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
477     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
478     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
479     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
480     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
481
482     # Put the tables together:
483     table = list()
484     # We do not want "New in CSIT":
485     # table.extend(tbl_new)
486     table.extend(tbl_see)
487     table.extend(tbl_delta)
488
489     return table
490
491
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493                              footnote=u"", sort_data=True):
494     """Generate html table from input data with simple sorting possibility.
495
496     :param header: Table header.
497     :param data: Input data to be included in the table. It is a list of lists.
498         Inner lists are rows in the table. All inner lists must be of the same
499         length. The length of these lists must be the same as the length of the
500         header.
501     :param out_file_name: The name (relative or full path) where the
502         generated html table is written.
503     :param legend: The legend to display below the table.
504     :param footnote: The footnote to display below the table (and legend).
505     :param sort_data: If True the data sorting is enabled.
506     :type header: list
507     :type data: list of lists
508     :type out_file_name: str
509     :type legend: str
510     :type footnote: str
511     :type sort_data: bool
512     """
513
514     try:
515         idx = header.index(u"Test Case")
516     except ValueError:
517         idx = 0
518     params = {
519         u"align-hdr": (
520             [u"left", u"center"],
521             [u"left", u"left", u"center"],
522             [u"left", u"left", u"left", u"center"]
523         ),
524         u"align-itm": (
525             [u"left", u"right"],
526             [u"left", u"left", u"right"],
527             [u"left", u"left", u"left", u"right"]
528         ),
529         u"width": ([28, 9], [4, 24, 10], [4, 4, 32, 10])
530     }
531
532     df_data = pd.DataFrame(data, columns=header)
533
534     if sort_data:
535         df_sorted = [df_data.sort_values(
536             by=[key, header[idx]], ascending=[True, True]
537             if key != header[idx] else [False, True]) for key in header]
538         df_sorted_rev = [df_data.sort_values(
539             by=[key, header[idx]], ascending=[False, True]
540             if key != header[idx] else [True, True]) for key in header]
541         df_sorted.extend(df_sorted_rev)
542     else:
543         df_sorted = df_data
544
545     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
546                    for idx in range(len(df_data))]]
547     table_header = dict(
548         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
549         fill_color=u"#7eade7",
550         align=params[u"align-hdr"][idx]
551     )
552
553     fig = go.Figure()
554
555     if sort_data:
556         for table in df_sorted:
557             columns = [table.get(col) for col in header]
558             fig.add_trace(
559                 go.Table(
560                     columnwidth=params[u"width"][idx],
561                     header=table_header,
562                     cells=dict(
563                         values=columns,
564                         fill_color=fill_color,
565                         align=params[u"align-itm"][idx]
566                     )
567                 )
568             )
569
570         buttons = list()
571         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
572         menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
573         menu_items.extend(menu_items_rev)
574         for idx, hdr in enumerate(menu_items):
575             visible = [False, ] * len(menu_items)
576             visible[idx] = True
577             buttons.append(
578                 dict(
579                     label=hdr.replace(u" [Mpps]", u""),
580                     method=u"update",
581                     args=[{u"visible": visible}],
582                 )
583             )
584
585         fig.update_layout(
586             updatemenus=[
587                 go.layout.Updatemenu(
588                     type=u"dropdown",
589                     direction=u"down",
590                     x=0.0,
591                     xanchor=u"left",
592                     y=1.045,
593                     yanchor=u"top",
594                     active=len(menu_items) - 1,
595                     buttons=list(buttons)
596                 )
597             ],
598         )
599     else:
600         fig.add_trace(
601             go.Table(
602                 columnwidth=params[u"width"][idx],
603                 header=table_header,
604                 cells=dict(
605                     values=[df_sorted.get(col) for col in header],
606                     fill_color=fill_color,
607                     align=params[u"align-itm"][idx]
608                 )
609             )
610         )
611
612     ploff.plot(
613         fig,
614         show_link=False,
615         auto_open=False,
616         filename=f"{out_file_name}_in.html"
617     )
618
619     file_name = out_file_name.split(u"/")[-1]
620     if u"vpp" in out_file_name:
621         path = u"_tmp/src/vpp_performance_tests/comparisons/"
622     else:
623         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
624     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
625         rst_file.write(
626             u"\n"
627             u".. |br| raw:: html\n\n    <br />\n\n\n"
628             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
629             u".. |preout| raw:: html\n\n    </pre>\n\n"
630         )
631         rst_file.write(
632             u".. raw:: html\n\n"
633             f'    <iframe frameborder="0" scrolling="no" '
634             f'width="1600" height="1200" '
635             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
636             f'</iframe>\n\n'
637         )
638         if legend:
639             rst_file.write(legend[1:].replace(u"\n", u" |br| "))
640         if footnote:
641             rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
642
643
644 def table_perf_comparison(table, input_data):
645     """Generate the table(s) with algorithm: table_perf_comparison
646     specified in the specification file.
647
648     :param table: Table to generate.
649     :param input_data: Data to process.
650     :type table: pandas.Series
651     :type input_data: InputData
652     """
653
654     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
655
656     # Transform the data
657     logging.info(
658         f"    Creating the data set for the {table.get(u'type', u'')} "
659         f"{table.get(u'title', u'')}."
660     )
661     data = input_data.filter_data(table, continue_on_error=True)
662
663     # Prepare the header of the tables
664     try:
665         header = [u"Test Case", ]
666         legend = u"\nLegend:\n"
667
668         rca_data = None
669         rca = table.get(u"rca", None)
670         if rca:
671             try:
672                 with open(rca.get(u"data-file", u""), u"r") as rca_file:
673                     rca_data = load(rca_file, Loader=FullLoader)
674                 header.insert(0, rca.get(u"title", u"RCA"))
675                 legend += (
676                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
677                 )
678             except (YAMLError, IOError) as err:
679                 logging.warning(repr(err))
680
681         history = table.get(u"history", list())
682         for item in history:
683             header.extend(
684                 [
685                     f"{item[u'title']} Avg({table[u'include-tests']})",
686                     f"{item[u'title']} Stdev({table[u'include-tests']})"
687                 ]
688             )
689             legend += (
690                 f"{item[u'title']} Avg({table[u'include-tests']}): "
691                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
692                 f"a series of runs of the listed tests executed against "
693                 f"{item[u'title']}.\n"
694                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
695                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
696                 f"computed from a series of runs of the listed tests executed "
697                 f"against {item[u'title']}.\n"
698             )
699         header.extend(
700             [
701                 f"{table[u'reference'][u'title']} "
702                 f"Avg({table[u'include-tests']})",
703                 f"{table[u'reference'][u'title']} "
704                 f"Stdev({table[u'include-tests']})",
705                 f"{table[u'compare'][u'title']} "
706                 f"Avg({table[u'include-tests']})",
707                 f"{table[u'compare'][u'title']} "
708                 f"Stdev({table[u'include-tests']})",
709                 f"Diff({table[u'reference'][u'title']},"
710                 f"{table[u'compare'][u'title']})",
711                 u"Stdev(Diff)"
712             ]
713         )
714         header_str = u";".join(header) + u"\n"
715         legend += (
716             f"{table[u'reference'][u'title']} "
717             f"Avg({table[u'include-tests']}): "
718             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
719             f"series of runs of the listed tests executed against "
720             f"{table[u'reference'][u'title']}.\n"
721             f"{table[u'reference'][u'title']} "
722             f"Stdev({table[u'include-tests']}): "
723             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
724             f"computed from a series of runs of the listed tests executed "
725             f"against {table[u'reference'][u'title']}.\n"
726             f"{table[u'compare'][u'title']} "
727             f"Avg({table[u'include-tests']}): "
728             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
729             f"series of runs of the listed tests executed against "
730             f"{table[u'compare'][u'title']}.\n"
731             f"{table[u'compare'][u'title']} "
732             f"Stdev({table[u'include-tests']}): "
733             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
734             f"computed from a series of runs of the listed tests executed "
735             f"against {table[u'compare'][u'title']}.\n"
736             f"Diff({table[u'reference'][u'title']},"
737             f"{table[u'compare'][u'title']}): "
738             f"Percentage change calculated for mean values.\n"
739             u"Stdev(Diff): "
740             u"Standard deviation of percentage change calculated for mean "
741             u"values.\n"
742             u"NT: Not Tested\n"
743         )
744     except (AttributeError, KeyError) as err:
745         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
746         return
747
748     # Prepare data to the table:
749     tbl_dict = dict()
750     for job, builds in table[u"reference"][u"data"].items():
751         for build in builds:
752             for tst_name, tst_data in data[job][str(build)].items():
753                 tst_name_mod = _tpc_modify_test_name(tst_name)
754                 if (u"across topologies" in table[u"title"].lower() or
755                         (u" 3n-" in table[u"title"].lower() and
756                          u" 2n-" in table[u"title"].lower())):
757                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
758                 if tbl_dict.get(tst_name_mod, None) is None:
759                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
760                     if u"across testbeds" in table[u"title"].lower() or \
761                             u"across topologies" in table[u"title"].lower():
762                         name = _tpc_modify_displayed_test_name(name)
763                     tbl_dict[tst_name_mod] = {
764                         u"name": name,
765                         u"replace-ref": True,
766                         u"replace-cmp": True,
767                         u"ref-data": list(),
768                         u"cmp-data": list()
769                     }
770                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
771                                  src=tst_data,
772                                  include_tests=table[u"include-tests"])
773
774     replacement = table[u"reference"].get(u"data-replacement", None)
775     if replacement:
776         rpl_data = input_data.filter_data(
777             table, data=replacement, continue_on_error=True)
778         for job, builds in replacement.items():
779             for build in builds:
780                 for tst_name, tst_data in rpl_data[job][str(build)].items():
781                     tst_name_mod = _tpc_modify_test_name(tst_name)
782                     if (u"across topologies" in table[u"title"].lower() or
783                             (u" 3n-" in table[u"title"].lower() and
784                              u" 2n-" in table[u"title"].lower())):
785                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
786                     if tbl_dict.get(tst_name_mod, None) is None:
787                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
788                         if u"across testbeds" in table[u"title"].lower() or \
789                                 u"across topologies" in table[u"title"].lower():
790                             name = _tpc_modify_displayed_test_name(name)
791                         tbl_dict[tst_name_mod] = {
792                             u"name": name,
793                             u"replace-ref": False,
794                             u"replace-cmp": True,
795                             u"ref-data": list(),
796                             u"cmp-data": list()
797                         }
798                     if tbl_dict[tst_name_mod][u"replace-ref"]:
799                         tbl_dict[tst_name_mod][u"replace-ref"] = False
800                         tbl_dict[tst_name_mod][u"ref-data"] = list()
801
802                     _tpc_insert_data(
803                         target=tbl_dict[tst_name_mod][u"ref-data"],
804                         src=tst_data,
805                         include_tests=table[u"include-tests"]
806                     )
807
808     for job, builds in table[u"compare"][u"data"].items():
809         for build in builds:
810             for tst_name, tst_data in data[job][str(build)].items():
811                 tst_name_mod = _tpc_modify_test_name(tst_name)
812                 if (u"across topologies" in table[u"title"].lower() or
813                         (u" 3n-" in table[u"title"].lower() and
814                          u" 2n-" in table[u"title"].lower())):
815                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
816                 if tbl_dict.get(tst_name_mod, None) is None:
817                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
818                     if u"across testbeds" in table[u"title"].lower() or \
819                             u"across topologies" in table[u"title"].lower():
820                         name = _tpc_modify_displayed_test_name(name)
821                     tbl_dict[tst_name_mod] = {
822                         u"name": name,
823                         u"replace-ref": False,
824                         u"replace-cmp": True,
825                         u"ref-data": list(),
826                         u"cmp-data": list()
827                     }
828                 _tpc_insert_data(
829                     target=tbl_dict[tst_name_mod][u"cmp-data"],
830                     src=tst_data,
831                     include_tests=table[u"include-tests"]
832                 )
833
834     replacement = table[u"compare"].get(u"data-replacement", None)
835     if replacement:
836         rpl_data = input_data.filter_data(
837             table, data=replacement, continue_on_error=True)
838         for job, builds in replacement.items():
839             for build in builds:
840                 for tst_name, tst_data in rpl_data[job][str(build)].items():
841                     tst_name_mod = _tpc_modify_test_name(tst_name)
842                     if (u"across topologies" in table[u"title"].lower() or
843                             (u" 3n-" in table[u"title"].lower() and
844                              u" 2n-" in table[u"title"].lower())):
845                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
846                     if tbl_dict.get(tst_name_mod, None) is None:
847                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
848                         if u"across testbeds" in table[u"title"].lower() or \
849                                 u"across topologies" in table[u"title"].lower():
850                             name = _tpc_modify_displayed_test_name(name)
851                         tbl_dict[tst_name_mod] = {
852                             u"name": name,
853                             u"replace-ref": False,
854                             u"replace-cmp": False,
855                             u"ref-data": list(),
856                             u"cmp-data": list()
857                         }
858                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
859                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
860                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
861
862                     _tpc_insert_data(
863                         target=tbl_dict[tst_name_mod][u"cmp-data"],
864                         src=tst_data,
865                         include_tests=table[u"include-tests"]
866                     )
867
868     for item in history:
869         for job, builds in item[u"data"].items():
870             for build in builds:
871                 for tst_name, tst_data in data[job][str(build)].items():
872                     tst_name_mod = _tpc_modify_test_name(tst_name)
873                     if (u"across topologies" in table[u"title"].lower() or
874                             (u" 3n-" in table[u"title"].lower() and
875                              u" 2n-" in table[u"title"].lower())):
876                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
877                     if tbl_dict.get(tst_name_mod, None) is None:
878                         continue
879                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
880                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
881                     if tbl_dict[tst_name_mod][u"history"].\
882                             get(item[u"title"], None) is None:
883                         tbl_dict[tst_name_mod][u"history"][item[
884                             u"title"]] = list()
885                     try:
886                         if table[u"include-tests"] == u"MRR":
887                             res = (tst_data[u"result"][u"receive-rate"],
888                                    tst_data[u"result"][u"receive-stdev"])
889                         elif table[u"include-tests"] == u"PDR":
890                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
891                         elif table[u"include-tests"] == u"NDR":
892                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
893                         else:
894                             continue
895                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
896                             append(res)
897                     except (TypeError, KeyError):
898                         pass
899
900     tbl_lst = list()
901     for tst_name in tbl_dict:
902         item = [tbl_dict[tst_name][u"name"], ]
903         if history:
904             if tbl_dict[tst_name].get(u"history", None) is not None:
905                 for hist_data in tbl_dict[tst_name][u"history"].values():
906                     if hist_data:
907                         if table[u"include-tests"] == u"MRR":
908                             item.append(round(hist_data[0][0] / 1e6, 1))
909                             item.append(round(hist_data[0][1] / 1e6, 1))
910                         else:
911                             item.append(round(mean(hist_data) / 1e6, 1))
912                             item.append(round(stdev(hist_data) / 1e6, 1))
913                     else:
914                         item.extend([u"NT", u"NT"])
915             else:
916                 item.extend([u"NT", u"NT"])
917         data_r = tbl_dict[tst_name][u"ref-data"]
918         if data_r:
919             if table[u"include-tests"] == u"MRR":
920                 data_r_mean = data_r[0][0]
921                 data_r_stdev = data_r[0][1]
922             else:
923                 data_r_mean = mean(data_r)
924                 data_r_stdev = stdev(data_r)
925             item.append(round(data_r_mean / 1e6, 1))
926             item.append(round(data_r_stdev / 1e6, 1))
927         else:
928             data_r_mean = None
929             data_r_stdev = None
930             item.extend([u"NT", u"NT"])
931         data_c = tbl_dict[tst_name][u"cmp-data"]
932         if data_c:
933             if table[u"include-tests"] == u"MRR":
934                 data_c_mean = data_c[0][0]
935                 data_c_stdev = data_c[0][1]
936             else:
937                 data_c_mean = mean(data_c)
938                 data_c_stdev = stdev(data_c)
939             item.append(round(data_c_mean / 1e6, 1))
940             item.append(round(data_c_stdev / 1e6, 1))
941         else:
942             data_c_mean = None
943             data_c_stdev = None
944             item.extend([u"NT", u"NT"])
945         if item[-2] == u"NT":
946             pass
947         elif item[-4] == u"NT":
948             item.append(u"New in CSIT-2001")
949             item.append(u"New in CSIT-2001")
950         elif data_r_mean is not None and data_c_mean is not None:
951             delta, d_stdev = relative_change_stdev(
952                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
953             )
954             try:
955                 item.append(round(delta))
956             except ValueError:
957                 item.append(delta)
958             try:
959                 item.append(round(d_stdev))
960             except ValueError:
961                 item.append(d_stdev)
962         if rca_data:
963             rca_nr = rca_data.get(item[0], u"-")
964             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
965         if (len(item) == len(header)) and (item[-4] != u"NT"):
966             tbl_lst.append(item)
967
968     tbl_lst = _tpc_sort_table(tbl_lst)
969
970     # Generate csv tables:
971     csv_file = f"{table[u'output-file']}.csv"
972     with open(csv_file, u"wt") as file_handler:
973         file_handler.write(header_str)
974         for test in tbl_lst:
975             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
976
977     txt_file_name = f"{table[u'output-file']}.txt"
978     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
979
980     footnote = u""
981     with open(txt_file_name, u'a') as txt_file:
982         txt_file.write(legend)
983         if rca_data:
984             footnote = rca_data.get(u"footnote", u"")
985             if footnote:
986                 txt_file.write(footnote)
987         txt_file.write(u":END")
988
989     # Generate html table:
990     _tpc_generate_html_table(
991         header,
992         tbl_lst,
993         table[u'output-file'],
994         legend=legend,
995         footnote=footnote
996     )
997
998
999 def table_perf_comparison_nic(table, input_data):
1000     """Generate the table(s) with algorithm: table_perf_comparison
1001     specified in the specification file.
1002
1003     :param table: Table to generate.
1004     :param input_data: Data to process.
1005     :type table: pandas.Series
1006     :type input_data: InputData
1007     """
1008
1009     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1010
1011     # Transform the data
1012     logging.info(
1013         f"    Creating the data set for the {table.get(u'type', u'')} "
1014         f"{table.get(u'title', u'')}."
1015     )
1016     data = input_data.filter_data(table, continue_on_error=True)
1017
1018     # Prepare the header of the tables
1019     try:
1020         header = [u"Test Case", ]
1021         legend = u"\nLegend:\n"
1022
1023         rca_data = None
1024         rca = table.get(u"rca", None)
1025         if rca:
1026             try:
1027                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1028                     rca_data = load(rca_file, Loader=FullLoader)
1029                 header.insert(0, rca.get(u"title", "RCA"))
1030                 legend += (
1031                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
1032                 )
1033             except (YAMLError, IOError) as err:
1034                 logging.warning(repr(err))
1035
1036         history = table.get(u"history", list())
1037         for item in history:
1038             header.extend(
1039                 [
1040                     f"{item[u'title']} Avg({table[u'include-tests']})",
1041                     f"{item[u'title']} Stdev({table[u'include-tests']})"
1042                 ]
1043             )
1044             legend += (
1045                 f"{item[u'title']} Avg({table[u'include-tests']}): "
1046                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1047                 f"a series of runs of the listed tests executed against "
1048                 f"{item[u'title']}.\n"
1049                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1050                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1051                 f"computed from a series of runs of the listed tests executed "
1052                 f"against {item[u'title']}.\n"
1053             )
1054         header.extend(
1055             [
1056                 f"{table[u'reference'][u'title']} "
1057                 f"Avg({table[u'include-tests']})",
1058                 f"{table[u'reference'][u'title']} "
1059                 f"Stdev({table[u'include-tests']})",
1060                 f"{table[u'compare'][u'title']} "
1061                 f"Avg({table[u'include-tests']})",
1062                 f"{table[u'compare'][u'title']} "
1063                 f"Stdev({table[u'include-tests']})",
1064                 f"Diff({table[u'reference'][u'title']},"
1065                 f"{table[u'compare'][u'title']})",
1066                 u"Stdev(Diff)"
1067             ]
1068         )
1069         header_str = u";".join(header) + u"\n"
1070         legend += (
1071             f"{table[u'reference'][u'title']} "
1072             f"Avg({table[u'include-tests']}): "
1073             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1074             f"series of runs of the listed tests executed against "
1075             f"{table[u'reference'][u'title']}.\n"
1076             f"{table[u'reference'][u'title']} "
1077             f"Stdev({table[u'include-tests']}): "
1078             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1079             f"computed from a series of runs of the listed tests executed "
1080             f"against {table[u'reference'][u'title']}.\n"
1081             f"{table[u'compare'][u'title']} "
1082             f"Avg({table[u'include-tests']}): "
1083             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1084             f"series of runs of the listed tests executed against "
1085             f"{table[u'compare'][u'title']}.\n"
1086             f"{table[u'compare'][u'title']} "
1087             f"Stdev({table[u'include-tests']}): "
1088             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1089             f"computed from a series of runs of the listed tests executed "
1090             f"against {table[u'compare'][u'title']}.\n"
1091             f"Diff({table[u'reference'][u'title']},"
1092             f"{table[u'compare'][u'title']}): "
1093             f"Percentage change calculated for mean values.\n"
1094             u"Stdev(Diff): "
1095             u"Standard deviation of percentage change calculated for mean "
1096             u"values.\n"
1097             u"NT: Not Tested\n"
1098         )
1099     except (AttributeError, KeyError) as err:
1100         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1101         return
1102
1103     # Prepare data to the table:
1104     tbl_dict = dict()
1105     for job, builds in table[u"reference"][u"data"].items():
1106         for build in builds:
1107             for tst_name, tst_data in data[job][str(build)].items():
1108                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1109                     continue
1110                 tst_name_mod = _tpc_modify_test_name(tst_name)
1111                 if (u"across topologies" in table[u"title"].lower() or
1112                         (u" 3n-" in table[u"title"].lower() and
1113                          u" 2n-" in table[u"title"].lower())):
1114                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1115                 if tbl_dict.get(tst_name_mod, None) is None:
1116                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1117                     if u"across testbeds" in table[u"title"].lower() or \
1118                             u"across topologies" in table[u"title"].lower():
1119                         name = _tpc_modify_displayed_test_name(name)
1120                     tbl_dict[tst_name_mod] = {
1121                         u"name": name,
1122                         u"replace-ref": True,
1123                         u"replace-cmp": True,
1124                         u"ref-data": list(),
1125                         u"cmp-data": list()
1126                     }
1127                 _tpc_insert_data(
1128                     target=tbl_dict[tst_name_mod][u"ref-data"],
1129                     src=tst_data,
1130                     include_tests=table[u"include-tests"]
1131                 )
1132
1133     replacement = table[u"reference"].get(u"data-replacement", None)
1134     if replacement:
1135         rpl_data = input_data.filter_data(
1136             table, data=replacement, continue_on_error=True)
1137         for job, builds in replacement.items():
1138             for build in builds:
1139                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1140                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1141                         continue
1142                     tst_name_mod = _tpc_modify_test_name(tst_name)
1143                     if (u"across topologies" in table[u"title"].lower() or
1144                             (u" 3n-" in table[u"title"].lower() and
1145                              u" 2n-" in table[u"title"].lower())):
1146                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1147                     if tbl_dict.get(tst_name_mod, None) is None:
1148                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1149                         if u"across testbeds" in table[u"title"].lower() or \
1150                                 u"across topologies" in table[u"title"].lower():
1151                             name = _tpc_modify_displayed_test_name(name)
1152                         tbl_dict[tst_name_mod] = {
1153                             u"name": name,
1154                             u"replace-ref": False,
1155                             u"replace-cmp": True,
1156                             u"ref-data": list(),
1157                             u"cmp-data": list()
1158                         }
1159                     if tbl_dict[tst_name_mod][u"replace-ref"]:
1160                         tbl_dict[tst_name_mod][u"replace-ref"] = False
1161                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1162
1163                     _tpc_insert_data(
1164                         target=tbl_dict[tst_name_mod][u"ref-data"],
1165                         src=tst_data,
1166                         include_tests=table[u"include-tests"]
1167                     )
1168
1169     for job, builds in table[u"compare"][u"data"].items():
1170         for build in builds:
1171             for tst_name, tst_data in data[job][str(build)].items():
1172                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1173                     continue
1174                 tst_name_mod = _tpc_modify_test_name(tst_name)
1175                 if (u"across topologies" in table[u"title"].lower() or
1176                         (u" 3n-" in table[u"title"].lower() and
1177                          u" 2n-" in table[u"title"].lower())):
1178                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1179                 if tbl_dict.get(tst_name_mod, None) is None:
1180                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1181                     if u"across testbeds" in table[u"title"].lower() or \
1182                             u"across topologies" in table[u"title"].lower():
1183                         name = _tpc_modify_displayed_test_name(name)
1184                     tbl_dict[tst_name_mod] = {
1185                         u"name": name,
1186                         u"replace-ref": False,
1187                         u"replace-cmp": True,
1188                         u"ref-data": list(),
1189                         u"cmp-data": list()
1190                     }
1191                 _tpc_insert_data(
1192                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1193                     src=tst_data,
1194                     include_tests=table[u"include-tests"]
1195                 )
1196
1197     replacement = table[u"compare"].get(u"data-replacement", None)
1198     if replacement:
1199         rpl_data = input_data.filter_data(
1200             table, data=replacement, continue_on_error=True)
1201         for job, builds in replacement.items():
1202             for build in builds:
1203                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1204                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1205                         continue
1206                     tst_name_mod = _tpc_modify_test_name(tst_name)
1207                     if (u"across topologies" in table[u"title"].lower() or
1208                             (u" 3n-" in table[u"title"].lower() and
1209                              u" 2n-" in table[u"title"].lower())):
1210                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1211                     if tbl_dict.get(tst_name_mod, None) is None:
1212                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1213                         if u"across testbeds" in table[u"title"].lower() or \
1214                                 u"across topologies" in table[u"title"].lower():
1215                             name = _tpc_modify_displayed_test_name(name)
1216                         tbl_dict[tst_name_mod] = {
1217                             u"name": name,
1218                             u"replace-ref": False,
1219                             u"replace-cmp": False,
1220                             u"ref-data": list(),
1221                             u"cmp-data": list()
1222                         }
1223                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
1224                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
1225                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1226
1227                     _tpc_insert_data(
1228                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1229                         src=tst_data,
1230                         include_tests=table[u"include-tests"]
1231                     )
1232
1233     for item in history:
1234         for job, builds in item[u"data"].items():
1235             for build in builds:
1236                 for tst_name, tst_data in data[job][str(build)].items():
1237                     if item[u"nic"] not in tst_data[u"tags"]:
1238                         continue
1239                     tst_name_mod = _tpc_modify_test_name(tst_name)
1240                     if (u"across topologies" in table[u"title"].lower() or
1241                             (u" 3n-" in table[u"title"].lower() and
1242                              u" 2n-" in table[u"title"].lower())):
1243                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1244                     if tbl_dict.get(tst_name_mod, None) is None:
1245                         continue
1246                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1247                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1248                     if tbl_dict[tst_name_mod][u"history"].\
1249                             get(item[u"title"], None) is None:
1250                         tbl_dict[tst_name_mod][u"history"][item[
1251                             u"title"]] = list()
1252                     try:
1253                         if table[u"include-tests"] == u"MRR":
1254                             res = (tst_data[u"result"][u"receive-rate"],
1255                                    tst_data[u"result"][u"receive-stdev"])
1256                         elif table[u"include-tests"] == u"PDR":
1257                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1258                         elif table[u"include-tests"] == u"NDR":
1259                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1260                         else:
1261                             continue
1262                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1263                             append(res)
1264                     except (TypeError, KeyError):
1265                         pass
1266
1267     tbl_lst = list()
1268     for tst_name in tbl_dict:
1269         item = [tbl_dict[tst_name][u"name"], ]
1270         if history:
1271             if tbl_dict[tst_name].get(u"history", None) is not None:
1272                 for hist_data in tbl_dict[tst_name][u"history"].values():
1273                     if hist_data:
1274                         if table[u"include-tests"] == u"MRR":
1275                             item.append(round(hist_data[0][0] / 1e6, 1))
1276                             item.append(round(hist_data[0][1] / 1e6, 1))
1277                         else:
1278                             item.append(round(mean(hist_data) / 1e6, 1))
1279                             item.append(round(stdev(hist_data) / 1e6, 1))
1280                     else:
1281                         item.extend([u"NT", u"NT"])
1282             else:
1283                 item.extend([u"NT", u"NT"])
1284         data_r = tbl_dict[tst_name][u"ref-data"]
1285         if data_r:
1286             if table[u"include-tests"] == u"MRR":
1287                 data_r_mean = data_r[0][0]
1288                 data_r_stdev = data_r[0][1]
1289             else:
1290                 data_r_mean = mean(data_r)
1291                 data_r_stdev = stdev(data_r)
1292             item.append(round(data_r_mean / 1e6, 1))
1293             item.append(round(data_r_stdev / 1e6, 1))
1294         else:
1295             data_r_mean = None
1296             data_r_stdev = None
1297             item.extend([u"NT", u"NT"])
1298         data_c = tbl_dict[tst_name][u"cmp-data"]
1299         if data_c:
1300             if table[u"include-tests"] == u"MRR":
1301                 data_c_mean = data_c[0][0]
1302                 data_c_stdev = data_c[0][1]
1303             else:
1304                 data_c_mean = mean(data_c)
1305                 data_c_stdev = stdev(data_c)
1306             item.append(round(data_c_mean / 1e6, 1))
1307             item.append(round(data_c_stdev / 1e6, 1))
1308         else:
1309             data_c_mean = None
1310             data_c_stdev = None
1311             item.extend([u"NT", u"NT"])
1312         if item[-2] == u"NT":
1313             pass
1314         elif item[-4] == u"NT":
1315             item.append(u"New in CSIT-2001")
1316             item.append(u"New in CSIT-2001")
1317         elif data_r_mean is not None and data_c_mean is not None:
1318             delta, d_stdev = relative_change_stdev(
1319                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1320             )
1321             try:
1322                 item.append(round(delta))
1323             except ValueError:
1324                 item.append(delta)
1325             try:
1326                 item.append(round(d_stdev))
1327             except ValueError:
1328                 item.append(d_stdev)
1329         if rca_data:
1330             rca_nr = rca_data.get(item[0], u"-")
1331             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1332         if (len(item) == len(header)) and (item[-4] != u"NT"):
1333             tbl_lst.append(item)
1334
1335     tbl_lst = _tpc_sort_table(tbl_lst)
1336
1337     # Generate csv tables:
1338     csv_file = f"{table[u'output-file']}.csv"
1339     with open(csv_file, u"wt") as file_handler:
1340         file_handler.write(header_str)
1341         for test in tbl_lst:
1342             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1343
1344     txt_file_name = f"{table[u'output-file']}.txt"
1345     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1346
1347     footnote = u""
1348     with open(txt_file_name, u'a') as txt_file:
1349         txt_file.write(legend)
1350         if rca_data:
1351             footnote = rca_data.get(u"footnote", u"")
1352             if footnote:
1353                 txt_file.write(footnote)
1354         txt_file.write(u":END")
1355
1356     # Generate html table:
1357     _tpc_generate_html_table(
1358         header,
1359         tbl_lst,
1360         table[u'output-file'],
1361         legend=legend,
1362         footnote=footnote
1363     )
1364
1365
1366 def table_nics_comparison(table, input_data):
1367     """Generate the table(s) with algorithm: table_nics_comparison
1368     specified in the specification file.
1369
1370     :param table: Table to generate.
1371     :param input_data: Data to process.
1372     :type table: pandas.Series
1373     :type input_data: InputData
1374     """
1375
1376     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1377
1378     # Transform the data
1379     logging.info(
1380         f"    Creating the data set for the {table.get(u'type', u'')} "
1381         f"{table.get(u'title', u'')}."
1382     )
1383     data = input_data.filter_data(table, continue_on_error=True)
1384
1385     # Prepare the header of the tables
1386     try:
1387         header = [
1388             u"Test Case",
1389             f"{table[u'reference'][u'title']} "
1390             f"Avg({table[u'include-tests']})",
1391             f"{table[u'reference'][u'title']} "
1392             f"Stdev({table[u'include-tests']})",
1393             f"{table[u'compare'][u'title']} "
1394             f"Avg({table[u'include-tests']})",
1395             f"{table[u'compare'][u'title']} "
1396             f"Stdev({table[u'include-tests']})",
1397             f"Diff({table[u'reference'][u'title']},"
1398             f"{table[u'compare'][u'title']})",
1399             u"Stdev(Diff)"
1400         ]
1401         legend = (
1402             u"\nLegend:\n"
1403             f"{table[u'reference'][u'title']} "
1404             f"Avg({table[u'include-tests']}): "
1405             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1406             f"series of runs of the listed tests executed using "
1407             f"{table[u'reference'][u'title']} NIC.\n"
1408             f"{table[u'reference'][u'title']} "
1409             f"Stdev({table[u'include-tests']}): "
1410             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1411             f"computed from a series of runs of the listed tests executed "
1412             f"using {table[u'reference'][u'title']} NIC.\n"
1413             f"{table[u'compare'][u'title']} "
1414             f"Avg({table[u'include-tests']}): "
1415             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1416             f"series of runs of the listed tests executed using "
1417             f"{table[u'compare'][u'title']} NIC.\n"
1418             f"{table[u'compare'][u'title']} "
1419             f"Stdev({table[u'include-tests']}): "
1420             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1421             f"computed from a series of runs of the listed tests executed "
1422             f"using {table[u'compare'][u'title']} NIC.\n"
1423             f"Diff({table[u'reference'][u'title']},"
1424             f"{table[u'compare'][u'title']}): "
1425             f"Percentage change calculated for mean values.\n"
1426             u"Stdev(Diff): "
1427             u"Standard deviation of percentage change calculated for mean "
1428             u"values.\n"
1429             u":END"
1430         )
1431
1432     except (AttributeError, KeyError) as err:
1433         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1434         return
1435
1436     # Prepare data to the table:
1437     tbl_dict = dict()
1438     for job, builds in table[u"data"].items():
1439         for build in builds:
1440             for tst_name, tst_data in data[job][str(build)].items():
1441                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1442                 if tbl_dict.get(tst_name_mod, None) is None:
1443                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1444                     tbl_dict[tst_name_mod] = {
1445                         u"name": name,
1446                         u"ref-data": list(),
1447                         u"cmp-data": list()
1448                     }
1449                 try:
1450                     if table[u"include-tests"] == u"MRR":
1451                         result = (tst_data[u"result"][u"receive-rate"],
1452                                   tst_data[u"result"][u"receive-stdev"])
1453                     elif table[u"include-tests"] == u"PDR":
1454                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1455                     elif table[u"include-tests"] == u"NDR":
1456                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1457                     else:
1458                         continue
1459
1460                     if result and \
1461                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1462                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1463                     elif result and \
1464                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1465                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1466                 except (TypeError, KeyError) as err:
1467                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1468                     # No data in output.xml for this test
1469
1470     tbl_lst = list()
1471     for tst_name in tbl_dict:
1472         item = [tbl_dict[tst_name][u"name"], ]
1473         data_r = tbl_dict[tst_name][u"ref-data"]
1474         if data_r:
1475             if table[u"include-tests"] == u"MRR":
1476                 data_r_mean = data_r[0][0]
1477                 data_r_stdev = data_r[0][1]
1478             else:
1479                 data_r_mean = mean(data_r)
1480                 data_r_stdev = stdev(data_r)
1481             item.append(round(data_r_mean / 1e6, 1))
1482             item.append(round(data_r_stdev / 1e6, 1))
1483         else:
1484             data_r_mean = None
1485             data_r_stdev = None
1486             item.extend([None, None])
1487         data_c = tbl_dict[tst_name][u"cmp-data"]
1488         if data_c:
1489             if table[u"include-tests"] == u"MRR":
1490                 data_c_mean = data_c[0][0]
1491                 data_c_stdev = data_c[0][1]
1492             else:
1493                 data_c_mean = mean(data_c)
1494                 data_c_stdev = stdev(data_c)
1495             item.append(round(data_c_mean / 1e6, 1))
1496             item.append(round(data_c_stdev / 1e6, 1))
1497         else:
1498             data_c_mean = None
1499             data_c_stdev = None
1500             item.extend([None, None])
1501         if data_r_mean is not None and data_c_mean is not None:
1502             delta, d_stdev = relative_change_stdev(
1503                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1504             )
1505             try:
1506                 item.append(round(delta))
1507             except ValueError:
1508                 item.append(delta)
1509             try:
1510                 item.append(round(d_stdev))
1511             except ValueError:
1512                 item.append(d_stdev)
1513             tbl_lst.append(item)
1514
1515     # Sort the table according to the relative change
1516     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1517
1518     # Generate csv tables:
1519     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1520         file_handler.write(u";".join(header) + u"\n")
1521         for test in tbl_lst:
1522             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1523
1524     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1525                               f"{table[u'output-file']}.txt",
1526                               delimiter=u";")
1527
1528     with open(table[u'output-file'], u'a') as txt_file:
1529         txt_file.write(legend)
1530
1531     # Generate html table:
1532     _tpc_generate_html_table(
1533         header,
1534         tbl_lst,
1535         table[u'output-file'],
1536         legend=legend
1537     )
1538
1539
1540 def table_soak_vs_ndr(table, input_data):
1541     """Generate the table(s) with algorithm: table_soak_vs_ndr
1542     specified in the specification file.
1543
1544     :param table: Table to generate.
1545     :param input_data: Data to process.
1546     :type table: pandas.Series
1547     :type input_data: InputData
1548     """
1549
1550     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1551
1552     # Transform the data
1553     logging.info(
1554         f"    Creating the data set for the {table.get(u'type', u'')} "
1555         f"{table.get(u'title', u'')}."
1556     )
1557     data = input_data.filter_data(table, continue_on_error=True)
1558
1559     # Prepare the header of the table
1560     try:
1561         header = [
1562             u"Test Case",
1563             f"Avg({table[u'reference'][u'title']})",
1564             f"Stdev({table[u'reference'][u'title']})",
1565             f"Avg({table[u'compare'][u'title']})",
1566             f"Stdev{table[u'compare'][u'title']})",
1567             u"Diff",
1568             u"Stdev(Diff)"
1569         ]
1570         header_str = u";".join(header) + u"\n"
1571         legend = (
1572             u"\nLegend:\n"
1573             f"Avg({table[u'reference'][u'title']}): "
1574             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1575             f"from a series of runs of the listed tests.\n"
1576             f"Stdev({table[u'reference'][u'title']}): "
1577             f"Standard deviation value of {table[u'reference'][u'title']} "
1578             f"[Mpps] computed from a series of runs of the listed tests.\n"
1579             f"Avg({table[u'compare'][u'title']}): "
1580             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1581             f"a series of runs of the listed tests.\n"
1582             f"Stdev({table[u'compare'][u'title']}): "
1583             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1584             f"computed from a series of runs of the listed tests.\n"
1585             f"Diff({table[u'reference'][u'title']},"
1586             f"{table[u'compare'][u'title']}): "
1587             f"Percentage change calculated for mean values.\n"
1588             u"Stdev(Diff): "
1589             u"Standard deviation of percentage change calculated for mean "
1590             u"values.\n"
1591             u":END"
1592         )
1593     except (AttributeError, KeyError) as err:
1594         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1595         return
1596
1597     # Create a list of available SOAK test results:
1598     tbl_dict = dict()
1599     for job, builds in table[u"compare"][u"data"].items():
1600         for build in builds:
1601             for tst_name, tst_data in data[job][str(build)].items():
1602                 if tst_data[u"type"] == u"SOAK":
1603                     tst_name_mod = tst_name.replace(u"-soak", u"")
1604                     if tbl_dict.get(tst_name_mod, None) is None:
1605                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1606                         nic = groups.group(0) if groups else u""
1607                         name = (
1608                             f"{nic}-"
1609                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1610                         )
1611                         tbl_dict[tst_name_mod] = {
1612                             u"name": name,
1613                             u"ref-data": list(),
1614                             u"cmp-data": list()
1615                         }
1616                     try:
1617                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1618                             tst_data[u"throughput"][u"LOWER"])
1619                     except (KeyError, TypeError):
1620                         pass
1621     tests_lst = tbl_dict.keys()
1622
1623     # Add corresponding NDR test results:
1624     for job, builds in table[u"reference"][u"data"].items():
1625         for build in builds:
1626             for tst_name, tst_data in data[job][str(build)].items():
1627                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1628                     replace(u"-mrr", u"")
1629                 if tst_name_mod not in tests_lst:
1630                     continue
1631                 try:
1632                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1633                         continue
1634                     if table[u"include-tests"] == u"MRR":
1635                         result = (tst_data[u"result"][u"receive-rate"],
1636                                   tst_data[u"result"][u"receive-stdev"])
1637                     elif table[u"include-tests"] == u"PDR":
1638                         result = \
1639                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1640                     elif table[u"include-tests"] == u"NDR":
1641                         result = \
1642                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1643                     else:
1644                         result = None
1645                     if result is not None:
1646                         tbl_dict[tst_name_mod][u"ref-data"].append(
1647                             result)
1648                 except (KeyError, TypeError):
1649                     continue
1650
1651     tbl_lst = list()
1652     for tst_name in tbl_dict:
1653         item = [tbl_dict[tst_name][u"name"], ]
1654         data_r = tbl_dict[tst_name][u"ref-data"]
1655         if data_r:
1656             if table[u"include-tests"] == u"MRR":
1657                 data_r_mean = data_r[0][0]
1658                 data_r_stdev = data_r[0][1]
1659             else:
1660                 data_r_mean = mean(data_r)
1661                 data_r_stdev = stdev(data_r)
1662             item.append(round(data_r_mean / 1e6, 1))
1663             item.append(round(data_r_stdev / 1e6, 1))
1664         else:
1665             data_r_mean = None
1666             data_r_stdev = None
1667             item.extend([None, None])
1668         data_c = tbl_dict[tst_name][u"cmp-data"]
1669         if data_c:
1670             if table[u"include-tests"] == u"MRR":
1671                 data_c_mean = data_c[0][0]
1672                 data_c_stdev = data_c[0][1]
1673             else:
1674                 data_c_mean = mean(data_c)
1675                 data_c_stdev = stdev(data_c)
1676             item.append(round(data_c_mean / 1e6, 1))
1677             item.append(round(data_c_stdev / 1e6, 1))
1678         else:
1679             data_c_mean = None
1680             data_c_stdev = None
1681             item.extend([None, None])
1682         if data_r_mean is not None and data_c_mean is not None:
1683             delta, d_stdev = relative_change_stdev(
1684                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1685             try:
1686                 item.append(round(delta))
1687             except ValueError:
1688                 item.append(delta)
1689             try:
1690                 item.append(round(d_stdev))
1691             except ValueError:
1692                 item.append(d_stdev)
1693             tbl_lst.append(item)
1694
1695     # Sort the table according to the relative change
1696     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1697
1698     # Generate csv tables:
1699     csv_file = f"{table[u'output-file']}.csv"
1700     with open(csv_file, u"wt") as file_handler:
1701         file_handler.write(header_str)
1702         for test in tbl_lst:
1703             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1704
1705     convert_csv_to_pretty_txt(
1706         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1707     )
1708     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1709         txt_file.write(legend)
1710
1711     # Generate html table:
1712     _tpc_generate_html_table(
1713         header,
1714         tbl_lst,
1715         table[u'output-file'],
1716         legend=legend
1717     )
1718
1719
1720 def table_perf_trending_dash(table, input_data):
1721     """Generate the table(s) with algorithm:
1722     table_perf_trending_dash
1723     specified in the specification file.
1724
1725     :param table: Table to generate.
1726     :param input_data: Data to process.
1727     :type table: pandas.Series
1728     :type input_data: InputData
1729     """
1730
1731     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1732
1733     # Transform the data
1734     logging.info(
1735         f"    Creating the data set for the {table.get(u'type', u'')} "
1736         f"{table.get(u'title', u'')}."
1737     )
1738     data = input_data.filter_data(table, continue_on_error=True)
1739
1740     # Prepare the header of the tables
1741     header = [
1742         u"Test Case",
1743         u"Trend [Mpps]",
1744         u"Short-Term Change [%]",
1745         u"Long-Term Change [%]",
1746         u"Regressions [#]",
1747         u"Progressions [#]"
1748     ]
1749     header_str = u",".join(header) + u"\n"
1750
1751     # Prepare data to the table:
1752     tbl_dict = dict()
1753     for job, builds in table[u"data"].items():
1754         for build in builds:
1755             for tst_name, tst_data in data[job][str(build)].items():
1756                 if tst_name.lower() in table.get(u"ignore-list", list()):
1757                     continue
1758                 if tbl_dict.get(tst_name, None) is None:
1759                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1760                     if not groups:
1761                         continue
1762                     nic = groups.group(0)
1763                     tbl_dict[tst_name] = {
1764                         u"name": f"{nic}-{tst_data[u'name']}",
1765                         u"data": OrderedDict()
1766                     }
1767                 try:
1768                     tbl_dict[tst_name][u"data"][str(build)] = \
1769                         tst_data[u"result"][u"receive-rate"]
1770                 except (TypeError, KeyError):
1771                     pass  # No data in output.xml for this test
1772
1773     tbl_lst = list()
1774     for tst_name in tbl_dict:
1775         data_t = tbl_dict[tst_name][u"data"]
1776         if len(data_t) < 2:
1777             continue
1778
1779         classification_lst, avgs = classify_anomalies(data_t)
1780
1781         win_size = min(len(data_t), table[u"window"])
1782         long_win_size = min(len(data_t), table[u"long-trend-window"])
1783
1784         try:
1785             max_long_avg = max(
1786                 [x for x in avgs[-long_win_size:-win_size]
1787                  if not isnan(x)])
1788         except ValueError:
1789             max_long_avg = nan
1790         last_avg = avgs[-1]
1791         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1792
1793         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1794             rel_change_last = nan
1795         else:
1796             rel_change_last = round(
1797                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1798
1799         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1800             rel_change_long = nan
1801         else:
1802             rel_change_long = round(
1803                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1804
1805         if classification_lst:
1806             if isnan(rel_change_last) and isnan(rel_change_long):
1807                 continue
1808             if isnan(last_avg) or isnan(rel_change_last) or \
1809                     isnan(rel_change_long):
1810                 continue
1811             tbl_lst.append(
1812                 [tbl_dict[tst_name][u"name"],
1813                  round(last_avg / 1e6, 2),
1814                  rel_change_last,
1815                  rel_change_long,
1816                  classification_lst[-win_size:].count(u"regression"),
1817                  classification_lst[-win_size:].count(u"progression")])
1818
1819     tbl_lst.sort(key=lambda rel: rel[0])
1820
1821     tbl_sorted = list()
1822     for nrr in range(table[u"window"], -1, -1):
1823         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1824         for nrp in range(table[u"window"], -1, -1):
1825             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1826             tbl_out.sort(key=lambda rel: rel[2])
1827             tbl_sorted.extend(tbl_out)
1828
1829     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1830
1831     logging.info(f"    Writing file: {file_name}")
1832     with open(file_name, u"wt") as file_handler:
1833         file_handler.write(header_str)
1834         for test in tbl_sorted:
1835             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1836
1837     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1838     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1839
1840
1841 def _generate_url(testbed, test_name):
1842     """Generate URL to a trending plot from the name of the test case.
1843
1844     :param testbed: The testbed used for testing.
1845     :param test_name: The name of the test case.
1846     :type testbed: str
1847     :type test_name: str
1848     :returns: The URL to the plot with the trending data for the given test
1849         case.
1850     :rtype str
1851     """
1852
1853     if u"x520" in test_name:
1854         nic = u"x520"
1855     elif u"x710" in test_name:
1856         nic = u"x710"
1857     elif u"xl710" in test_name:
1858         nic = u"xl710"
1859     elif u"xxv710" in test_name:
1860         nic = u"xxv710"
1861     elif u"vic1227" in test_name:
1862         nic = u"vic1227"
1863     elif u"vic1385" in test_name:
1864         nic = u"vic1385"
1865     elif u"x553" in test_name:
1866         nic = u"x553"
1867     else:
1868         nic = u""
1869
1870     if u"64b" in test_name:
1871         frame_size = u"64b"
1872     elif u"78b" in test_name:
1873         frame_size = u"78b"
1874     elif u"imix" in test_name:
1875         frame_size = u"imix"
1876     elif u"9000b" in test_name:
1877         frame_size = u"9000b"
1878     elif u"1518b" in test_name:
1879         frame_size = u"1518b"
1880     elif u"114b" in test_name:
1881         frame_size = u"114b"
1882     else:
1883         frame_size = u""
1884
1885     if u"1t1c" in test_name or \
1886         (u"-1c-" in test_name and
1887          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1888         cores = u"1t1c"
1889     elif u"2t2c" in test_name or \
1890          (u"-2c-" in test_name and
1891           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1892         cores = u"2t2c"
1893     elif u"4t4c" in test_name or \
1894          (u"-4c-" in test_name and
1895           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1896         cores = u"4t4c"
1897     elif u"2t1c" in test_name or \
1898          (u"-1c-" in test_name and
1899           testbed in (u"2n-skx", u"3n-skx")):
1900         cores = u"2t1c"
1901     elif u"4t2c" in test_name:
1902         cores = u"4t2c"
1903     elif u"8t4c" in test_name:
1904         cores = u"8t4c"
1905     else:
1906         cores = u""
1907
1908     if u"testpmd" in test_name:
1909         driver = u"testpmd"
1910     elif u"l3fwd" in test_name:
1911         driver = u"l3fwd"
1912     elif u"avf" in test_name:
1913         driver = u"avf"
1914     elif u"dnv" in testbed or u"tsh" in testbed:
1915         driver = u"ixgbe"
1916     else:
1917         driver = u"dpdk"
1918
1919     if u"acl" in test_name or \
1920             u"macip" in test_name or \
1921             u"nat" in test_name or \
1922             u"policer" in test_name or \
1923             u"cop" in test_name:
1924         bsf = u"features"
1925     elif u"scale" in test_name:
1926         bsf = u"scale"
1927     elif u"base" in test_name:
1928         bsf = u"base"
1929     else:
1930         bsf = u"base"
1931
1932     if u"114b" in test_name and u"vhost" in test_name:
1933         domain = u"vts"
1934     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1935         domain = u"dpdk"
1936     elif u"memif" in test_name:
1937         domain = u"container_memif"
1938     elif u"srv6" in test_name:
1939         domain = u"srv6"
1940     elif u"vhost" in test_name:
1941         domain = u"vhost"
1942         if u"vppl2xc" in test_name:
1943             driver += u"-vpp"
1944         else:
1945             driver += u"-testpmd"
1946         if u"lbvpplacp" in test_name:
1947             bsf += u"-link-bonding"
1948     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1949         domain = u"nf_service_density_vnfc"
1950     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1951         domain = u"nf_service_density_cnfc"
1952     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1953         domain = u"nf_service_density_cnfp"
1954     elif u"ipsec" in test_name:
1955         domain = u"ipsec"
1956         if u"sw" in test_name:
1957             bsf += u"-sw"
1958         elif u"hw" in test_name:
1959             bsf += u"-hw"
1960     elif u"ethip4vxlan" in test_name:
1961         domain = u"ip4_tunnels"
1962     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1963         domain = u"ip4"
1964     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1965         domain = u"ip6"
1966     elif u"l2xcbase" in test_name or \
1967             u"l2xcscale" in test_name or \
1968             u"l2bdbasemaclrn" in test_name or \
1969             u"l2bdscale" in test_name or \
1970             u"l2patch" in test_name:
1971         domain = u"l2"
1972     else:
1973         domain = u""
1974
1975     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1976     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1977
1978     return file_name + anchor_name
1979
1980
1981 def table_perf_trending_dash_html(table, input_data):
1982     """Generate the table(s) with algorithm:
1983     table_perf_trending_dash_html specified in the specification
1984     file.
1985
1986     :param table: Table to generate.
1987     :param input_data: Data to process.
1988     :type table: dict
1989     :type input_data: InputData
1990     """
1991
1992     _ = input_data
1993
1994     if not table.get(u"testbed", None):
1995         logging.error(
1996             f"The testbed is not defined for the table "
1997             f"{table.get(u'title', u'')}."
1998         )
1999         return
2000
2001     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2002
2003     try:
2004         with open(table[u"input-file"], u'rt') as csv_file:
2005             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2006     except KeyError:
2007         logging.warning(u"The input file is not defined.")
2008         return
2009     except csv.Error as err:
2010         logging.warning(
2011             f"Not possible to process the file {table[u'input-file']}.\n"
2012             f"{repr(err)}"
2013         )
2014         return
2015
2016     # Table:
2017     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2018
2019     # Table header:
2020     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2021     for idx, item in enumerate(csv_lst[0]):
2022         alignment = u"left" if idx == 0 else u"center"
2023         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2024         thead.text = item
2025
2026     # Rows:
2027     colors = {
2028         u"regression": (
2029             u"#ffcccc",
2030             u"#ff9999"
2031         ),
2032         u"progression": (
2033             u"#c6ecc6",
2034             u"#9fdf9f"
2035         ),
2036         u"normal": (
2037             u"#e9f1fb",
2038             u"#d4e4f7"
2039         )
2040     }
2041     for r_idx, row in enumerate(csv_lst[1:]):
2042         if int(row[4]):
2043             color = u"regression"
2044         elif int(row[5]):
2045             color = u"progression"
2046         else:
2047             color = u"normal"
2048         trow = ET.SubElement(
2049             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2050         )
2051
2052         # Columns:
2053         for c_idx, item in enumerate(row):
2054             tdata = ET.SubElement(
2055                 trow,
2056                 u"td",
2057                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2058             )
2059             # Name:
2060             if c_idx == 0:
2061                 ref = ET.SubElement(
2062                     tdata,
2063                     u"a",
2064                     attrib=dict(
2065                         href=f"../trending/"
2066                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2067                     )
2068                 )
2069                 ref.text = item
2070             else:
2071                 tdata.text = item
2072     try:
2073         with open(table[u"output-file"], u'w') as html_file:
2074             logging.info(f"    Writing file: {table[u'output-file']}")
2075             html_file.write(u".. raw:: html\n\n\t")
2076             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2077             html_file.write(u"\n\t<p><br><br></p>\n")
2078     except KeyError:
2079         logging.warning(u"The output file is not defined.")
2080         return
2081
2082
2083 def table_last_failed_tests(table, input_data):
2084     """Generate the table(s) with algorithm: table_last_failed_tests
2085     specified in the specification file.
2086
2087     :param table: Table to generate.
2088     :param input_data: Data to process.
2089     :type table: pandas.Series
2090     :type input_data: InputData
2091     """
2092
2093     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2094
2095     # Transform the data
2096     logging.info(
2097         f"    Creating the data set for the {table.get(u'type', u'')} "
2098         f"{table.get(u'title', u'')}."
2099     )
2100
2101     data = input_data.filter_data(table, continue_on_error=True)
2102
2103     if data is None or data.empty:
2104         logging.warning(
2105             f"    No data for the {table.get(u'type', u'')} "
2106             f"{table.get(u'title', u'')}."
2107         )
2108         return
2109
2110     tbl_list = list()
2111     for job, builds in table[u"data"].items():
2112         for build in builds:
2113             build = str(build)
2114             try:
2115                 version = input_data.metadata(job, build).get(u"version", u"")
2116             except KeyError:
2117                 logging.error(f"Data for {job}: {build} is not present.")
2118                 return
2119             tbl_list.append(build)
2120             tbl_list.append(version)
2121             failed_tests = list()
2122             passed = 0
2123             failed = 0
2124             for tst_data in data[job][build].values:
2125                 if tst_data[u"status"] != u"FAIL":
2126                     passed += 1
2127                     continue
2128                 failed += 1
2129                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2130                 if not groups:
2131                     continue
2132                 nic = groups.group(0)
2133                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2134             tbl_list.append(str(passed))
2135             tbl_list.append(str(failed))
2136             tbl_list.extend(failed_tests)
2137
2138     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2139     logging.info(f"    Writing file: {file_name}")
2140     with open(file_name, u"wt") as file_handler:
2141         for test in tbl_list:
2142             file_handler.write(test + u'\n')
2143
2144
2145 def table_failed_tests(table, input_data):
2146     """Generate the table(s) with algorithm: table_failed_tests
2147     specified in the specification file.
2148
2149     :param table: Table to generate.
2150     :param input_data: Data to process.
2151     :type table: pandas.Series
2152     :type input_data: InputData
2153     """
2154
2155     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2156
2157     # Transform the data
2158     logging.info(
2159         f"    Creating the data set for the {table.get(u'type', u'')} "
2160         f"{table.get(u'title', u'')}."
2161     )
2162     data = input_data.filter_data(table, continue_on_error=True)
2163
2164     # Prepare the header of the tables
2165     header = [
2166         u"Test Case",
2167         u"Failures [#]",
2168         u"Last Failure [Time]",
2169         u"Last Failure [VPP-Build-Id]",
2170         u"Last Failure [CSIT-Job-Build-Id]"
2171     ]
2172
2173     # Generate the data for the table according to the model in the table
2174     # specification
2175
2176     now = dt.utcnow()
2177     timeperiod = timedelta(int(table.get(u"window", 7)))
2178
2179     tbl_dict = dict()
2180     for job, builds in table[u"data"].items():
2181         for build in builds:
2182             build = str(build)
2183             for tst_name, tst_data in data[job][build].items():
2184                 if tst_name.lower() in table.get(u"ignore-list", list()):
2185                     continue
2186                 if tbl_dict.get(tst_name, None) is None:
2187                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
2188                     if not groups:
2189                         continue
2190                     nic = groups.group(0)
2191                     tbl_dict[tst_name] = {
2192                         u"name": f"{nic}-{tst_data[u'name']}",
2193                         u"data": OrderedDict()
2194                     }
2195                 try:
2196                     generated = input_data.metadata(job, build).\
2197                         get(u"generated", u"")
2198                     if not generated:
2199                         continue
2200                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
2201                     if (now - then) <= timeperiod:
2202                         tbl_dict[tst_name][u"data"][build] = (
2203                             tst_data[u"status"],
2204                             generated,
2205                             input_data.metadata(job, build).get(u"version",
2206                                                                 u""),
2207                             build
2208                         )
2209                 except (TypeError, KeyError) as err:
2210                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2211
2212     max_fails = 0
2213     tbl_lst = list()
2214     for tst_data in tbl_dict.values():
2215         fails_nr = 0
2216         fails_last_date = u""
2217         fails_last_vpp = u""
2218         fails_last_csit = u""
2219         for val in tst_data[u"data"].values():
2220             if val[0] == u"FAIL":
2221                 fails_nr += 1
2222                 fails_last_date = val[1]
2223                 fails_last_vpp = val[2]
2224                 fails_last_csit = val[3]
2225         if fails_nr:
2226             max_fails = fails_nr if fails_nr > max_fails else max_fails
2227             tbl_lst.append(
2228                 [
2229                     tst_data[u"name"],
2230                     fails_nr,
2231                     fails_last_date,
2232                     fails_last_vpp,
2233                     f"mrr-daily-build-{fails_last_csit}"
2234                 ]
2235             )
2236
2237     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2238     tbl_sorted = list()
2239     for nrf in range(max_fails, -1, -1):
2240         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2241         tbl_sorted.extend(tbl_fails)
2242
2243     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2244     logging.info(f"    Writing file: {file_name}")
2245     with open(file_name, u"wt") as file_handler:
2246         file_handler.write(u",".join(header) + u"\n")
2247         for test in tbl_sorted:
2248             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2249
2250     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2251     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2252
2253
2254 def table_failed_tests_html(table, input_data):
2255     """Generate the table(s) with algorithm: table_failed_tests_html
2256     specified in the specification file.
2257
2258     :param table: Table to generate.
2259     :param input_data: Data to process.
2260     :type table: pandas.Series
2261     :type input_data: InputData
2262     """
2263
2264     _ = input_data
2265
2266     if not table.get(u"testbed", None):
2267         logging.error(
2268             f"The testbed is not defined for the table "
2269             f"{table.get(u'title', u'')}."
2270         )
2271         return
2272
2273     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2274
2275     try:
2276         with open(table[u"input-file"], u'rt') as csv_file:
2277             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2278     except KeyError:
2279         logging.warning(u"The input file is not defined.")
2280         return
2281     except csv.Error as err:
2282         logging.warning(
2283             f"Not possible to process the file {table[u'input-file']}.\n"
2284             f"{repr(err)}"
2285         )
2286         return
2287
2288     # Table:
2289     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2290
2291     # Table header:
2292     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2293     for idx, item in enumerate(csv_lst[0]):
2294         alignment = u"left" if idx == 0 else u"center"
2295         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2296         thead.text = item
2297
2298     # Rows:
2299     colors = (u"#e9f1fb", u"#d4e4f7")
2300     for r_idx, row in enumerate(csv_lst[1:]):
2301         background = colors[r_idx % 2]
2302         trow = ET.SubElement(
2303             failed_tests, u"tr", attrib=dict(bgcolor=background)
2304         )
2305
2306         # Columns:
2307         for c_idx, item in enumerate(row):
2308             tdata = ET.SubElement(
2309                 trow,
2310                 u"td",
2311                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2312             )
2313             # Name:
2314             if c_idx == 0:
2315                 ref = ET.SubElement(
2316                     tdata,
2317                     u"a",
2318                     attrib=dict(
2319                         href=f"../trending/"
2320                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2321                     )
2322                 )
2323                 ref.text = item
2324             else:
2325                 tdata.text = item
2326     try:
2327         with open(table[u"output-file"], u'w') as html_file:
2328             logging.info(f"    Writing file: {table[u'output-file']}")
2329             html_file.write(u".. raw:: html\n\n\t")
2330             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2331             html_file.write(u"\n\t<p><br><br></p>\n")
2332     except KeyError:
2333         logging.warning(u"The output file is not defined.")
2334         return
2335
2336
2337 def table_comparison(table, input_data):
2338     """Generate the table(s) with algorithm: table_comparison
2339     specified in the specification file.
2340
2341     :param table: Table to generate.
2342     :param input_data: Data to process.
2343     :type table: pandas.Series
2344     :type input_data: InputData
2345     """
2346     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2347
2348     # Transform the data
2349     logging.info(
2350         f"    Creating the data set for the {table.get(u'type', u'')} "
2351         f"{table.get(u'title', u'')}."
2352     )
2353
2354     columns = table.get(u"columns", None)
2355     if not columns:
2356         logging.error(
2357             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2358         )
2359         return
2360
2361     cols = list()
2362     for idx, col in enumerate(columns):
2363         if col.get(u"data", None) is None:
2364             logging.warning(f"No data for column {col.get(u'title', u'')}")
2365             continue
2366         data = input_data.filter_data(
2367             table,
2368             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2369             data=col[u"data"],
2370             continue_on_error=True
2371         )
2372         col_data = {
2373             u"title": col.get(u"title", f"Column{idx}"),
2374             u"data": dict()
2375         }
2376         for builds in data.values:
2377             for build in builds:
2378                 for tst_name, tst_data in build.items():
2379                     tst_name_mod = \
2380                         _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2381                     if col_data[u"data"].get(tst_name_mod, None) is None:
2382                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
2383                         if u"across testbeds" in table[u"title"].lower() or \
2384                                 u"across topologies" in table[u"title"].lower():
2385                             name = _tpc_modify_displayed_test_name(name)
2386                         col_data[u"data"][tst_name_mod] = {
2387                             u"name": name,
2388                             u"replace": True,
2389                             u"data": list(),
2390                             u"mean": None,
2391                             u"stdev": None
2392                         }
2393                     _tpc_insert_data(
2394                         target=col_data[u"data"][tst_name_mod][u"data"],
2395                         src=tst_data,
2396                         include_tests=table[u"include-tests"]
2397                     )
2398
2399         replacement = col.get(u"data-replacement", None)
2400         if replacement:
2401             rpl_data = input_data.filter_data(
2402                 table,
2403                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2404                 data=replacement,
2405                 continue_on_error=True
2406             )
2407             for builds in rpl_data.values:
2408                 for build in builds:
2409                     for tst_name, tst_data in build.items():
2410                         tst_name_mod = \
2411                             _tpc_modify_test_name(tst_name).\
2412                             replace(u"2n1l-", u"")
2413                         if col_data[u"data"].get(tst_name_mod, None) is None:
2414                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
2415                             if u"across testbeds" in table[u"title"].lower() \
2416                                     or u"across topologies" in \
2417                                     table[u"title"].lower():
2418                                 name = _tpc_modify_displayed_test_name(name)
2419                             col_data[u"data"][tst_name_mod] = {
2420                                 u"name": name,
2421                                 u"replace": False,
2422                                 u"data": list(),
2423                                 u"mean": None,
2424                                 u"stdev": None
2425                             }
2426                         if col_data[u"data"][tst_name_mod][u"replace"]:
2427                             col_data[u"data"][tst_name_mod][u"replace"] = False
2428                             col_data[u"data"][tst_name_mod][u"data"] = list()
2429                         _tpc_insert_data(
2430                             target=col_data[u"data"][tst_name_mod][u"data"],
2431                             src=tst_data,
2432                             include_tests=table[u"include-tests"]
2433                         )
2434
2435         if table[u"include-tests"] in (u"NDR", u"PDR"):
2436             for tst_name, tst_data in col_data[u"data"].items():
2437                 if tst_data[u"data"]:
2438                     tst_data[u"mean"] = mean(tst_data[u"data"])
2439                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
2440         elif table[u"include-tests"] in (u"MRR", ):
2441             for tst_name, tst_data in col_data[u"data"].items():
2442                 if tst_data[u"data"]:
2443                     tst_data[u"mean"] = tst_data[u"data"][0]
2444                     tst_data[u"stdev"] = tst_data[u"data"][0]
2445
2446         cols.append(col_data)
2447
2448     tbl_dict = dict()
2449     for col in cols:
2450         for tst_name, tst_data in col[u"data"].items():
2451             if tbl_dict.get(tst_name, None) is None:
2452                 tbl_dict[tst_name] = {
2453                     "name": tst_data[u"name"]
2454                 }
2455             tbl_dict[tst_name][col[u"title"]] = {
2456                 u"mean": tst_data[u"mean"],
2457                 u"stdev": tst_data[u"stdev"]
2458             }
2459
2460     tbl_lst = list()
2461     for tst_data in tbl_dict.values():
2462         row = [tst_data[u"name"], ]
2463         for col in cols:
2464             row.append(tst_data.get(col[u"title"], None))
2465         tbl_lst.append(row)
2466
2467     comparisons = table.get(u"comparisons", None)
2468     if comparisons and isinstance(comparisons, list):
2469         for idx, comp in enumerate(comparisons):
2470             try:
2471                 col_ref = int(comp[u"reference"])
2472                 col_cmp = int(comp[u"compare"])
2473             except KeyError:
2474                 logging.warning(u"Comparison: No references defined! Skipping.")
2475                 comparisons.pop(idx)
2476                 continue
2477             if not (0 < col_ref <= len(cols) and
2478                     0 < col_cmp <= len(cols)) or \
2479                     col_ref == col_cmp:
2480                 logging.warning(f"Wrong values of reference={col_ref} "
2481                                 f"and/or compare={col_cmp}. Skipping.")
2482                 comparisons.pop(idx)
2483                 continue
2484
2485     tbl_cmp_lst = list()
2486     if comparisons:
2487         for row in tbl_lst:
2488             new_row = deepcopy(row)
2489             add_to_tbl = False
2490             for comp in comparisons:
2491                 ref_itm = row[int(comp[u"reference"])]
2492                 if ref_itm is None and \
2493                         comp.get(u"reference-alt", None) is not None:
2494                     ref_itm = row[int(comp[u"reference-alt"])]
2495                 cmp_itm = row[int(comp[u"compare"])]
2496                 if ref_itm is not None and cmp_itm is not None and \
2497                         ref_itm[u"mean"] is not None and \
2498                         cmp_itm[u"mean"] is not None and \
2499                         ref_itm[u"stdev"] is not None and \
2500                         cmp_itm[u"stdev"] is not None:
2501                     delta, d_stdev = relative_change_stdev(
2502                         ref_itm[u"mean"], cmp_itm[u"mean"],
2503                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
2504                     )
2505                     new_row.append(
2506                         {
2507                             u"mean": delta * 1e6,
2508                             u"stdev": d_stdev * 1e6
2509                         }
2510                     )
2511                     add_to_tbl = True
2512                 else:
2513                     new_row.append(None)
2514             if add_to_tbl:
2515                 tbl_cmp_lst.append(new_row)
2516
2517     tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2518     tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2519
2520     rcas = list()
2521     rca_in = table.get(u"rca", None)
2522     if rca_in and isinstance(rca_in, list):
2523         for idx, itm in enumerate(rca_in):
2524             try:
2525                 with open(itm.get(u"data", u""), u"r") as rca_file:
2526                     rcas.append(
2527                         {
2528                             u"title": itm.get(u"title", f"RCA{idx}"),
2529                             u"data": load(rca_file, Loader=FullLoader)
2530                         }
2531                     )
2532             except (YAMLError, IOError) as err:
2533                 logging.warning(
2534                     f"The RCA file {itm.get(u'data', u'')} does not exist or "
2535                     f"it is corrupted!"
2536                 )
2537                 logging.debug(repr(err))
2538
2539     tbl_for_csv = list()
2540     for line in tbl_cmp_lst:
2541
2542         row = [line[0], ]
2543
2544         for idx, rca in enumerate(rcas):
2545             rca_nr = rca[u"data"].get(row[0 + idx], u"-")
2546             row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2547
2548         for idx, itm in enumerate(line[1:]):
2549             if itm is None:
2550                 row.append(u"NT")
2551                 row.append(u"NT")
2552             else:
2553                 row.append(round(float(itm[u'mean']) / 1e6, 3))
2554                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2555         tbl_for_csv.append(row)
2556
2557     header_csv = [rca[u"title"] for rca in rcas]
2558     header_csv.append(u"Test Case")
2559     for col in cols:
2560         header_csv.append(f"Avg({col[u'title']})")
2561         header_csv.append(f"Stdev({col[u'title']})")
2562     for comp in comparisons:
2563         header_csv.append(
2564             f"Avg({cols[comp[u'reference'] - 1][u'title']},"
2565             f"{cols[comp[u'compare'] - 1][u'title']})"
2566         )
2567         header_csv.append(
2568             f"Stdev({cols[comp[u'reference'] - 1][u'title']},"
2569             f"{cols[comp[u'compare'] - 1][u'title']})"
2570         )
2571
2572     csv_file = f"{table[u'output-file']}-csv.csv"
2573     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2574         file_handler.write(u";".join(header_csv) + u"\n")
2575         for test in tbl_for_csv:
2576             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2577
2578     tbl_final = list()
2579     for line in tbl_cmp_lst:
2580         row = [line[0], ]
2581         for idx, rca in enumerate(rcas):
2582             rca_nr = rca[u"data"].get(row[0 + idx], u"-")
2583             row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2584         for idx, itm in enumerate(line[1:]):
2585             if itm is None:
2586                 row.append(u"NT")
2587             else:
2588                 if idx < len(cols):
2589                     row.append(
2590                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
2591                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2592                         replace(u"nan", u"NaN")
2593                     )
2594                 else:
2595                     row.append(
2596                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2597                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2598                         replace(u"nan", u"NaN")
2599                     )
2600         tbl_final.append(row)
2601
2602     header = [rca[u"title"] for rca in rcas]
2603     header.append(u"Test Case")
2604     header.extend([col[u"title"] for col in cols])
2605     header.extend(
2606         [f"Diff({cols[comp[u'reference'] - 1][u'title']},"
2607          f"{cols[comp[u'compare'] - 1][u'title']})"
2608          for comp in comparisons]
2609     )
2610
2611     # Generate csv tables:
2612     csv_file = f"{table[u'output-file']}.csv"
2613     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2614         file_handler.write(u";".join(header) + u"\n")
2615         for test in tbl_final:
2616             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2617
2618     # Generate txt table:
2619     txt_file_name = f"{table[u'output-file']}.txt"
2620     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2621
2622     # Generate rst table:
2623     file_name = table[u'output-file'].split(u"/")[-1]
2624     if u"vpp" in table[u'output-file']:
2625         path = u"_tmp/src/vpp_performance_tests/comparisons/"
2626     else:
2627         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
2628     rst_file_name = f"{path}{file_name}-txt.rst"
2629     csv_file_name = f"{path}{file_name}.csv"
2630     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2631         file_handler.write(
2632             u",".join(
2633                 [f'"{itm}"' for itm in header]
2634             ) + u"\n"
2635         )
2636         for test in tbl_final:
2637             file_handler.write(
2638                 u",".join(
2639                     [f'"{itm}"' for itm in test]
2640                 ) + u"\n"
2641             )
2642
2643     convert_csv_to_pretty_txt(csv_file_name, rst_file_name, delimiter=u",")
2644
2645     legend = u"\nLegend:\n"
2646     for idx, rca in enumerate(rcas):
2647         try:
2648             desc = (
2649                 f"Diff({cols[comparisons[idx][u'reference'] - 1][u'title']},"
2650                 f"{cols[comparisons[idx][u'compare'] - 1][u'title']})\n"
2651             )
2652         except (KeyError, IndexError):
2653             desc = u"\n"
2654         legend += f"{rca[u'title']}: Root Cause Analysis for {desc}"
2655     legend += (
2656         u"First part of the result is a mean value [Mpps].\n"
2657         f"Second part of the result following '\u00B1' is a standard "
2658         u"deviation [Mpps].\n"
2659         u"First part of Diff is a relative change of mean values [%].\n"
2660         f"Second part of Diff following '\u00B1' is a standard deviation "
2661         u"of the Diff [percentual points].\n"
2662         u"NT: Not tested.\n"
2663     )
2664
2665     footnote = u""
2666     for rca in rcas:
2667         footnote += f"\n{rca[u'title']}:\n"
2668         footnote += rca[u"data"].get(u"footnote", u"")
2669
2670     with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2671         txt_file.write(legend)
2672         if footnote:
2673             txt_file.write(footnote)
2674         txt_file.write(u":END")
2675
2676     with open(rst_file_name, u'a', encoding='utf-8') as txt_file:
2677         txt_file.write(legend.replace(u"\n", u" |br| "))
2678         if footnote:
2679             txt_file.write(footnote.replace(u"\n", u" |br| "))
2680         txt_file.write(u":END")
2681
2682     # Generate html table:
2683     _tpc_generate_html_table(
2684         header,
2685         tbl_final,
2686         table[u'output-file'],
2687         legend=legend,
2688         footnote=footnote,
2689         sort_data=False
2690     )