PAL: Integrate new comp tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_perf_comparison": table_perf_comparison,
54         u"table_perf_comparison_nic": table_perf_comparison_nic,
55         u"table_nics_comparison": table_nics_comparison,
56         u"table_soak_vs_ndr": table_soak_vs_ndr,
57         u"table_perf_trending_dash": table_perf_trending_dash,
58         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59         u"table_last_failed_tests": table_last_failed_tests,
60         u"table_failed_tests": table_failed_tests,
61         u"table_failed_tests_html": table_failed_tests_html,
62         u"table_oper_data_html": table_oper_data_html,
63         u"table_comparison": table_comparison
64     }
65
66     logging.info(u"Generating the tables ...")
67     for table in spec.tables:
68         try:
69             generator[table[u"algorithm"]](table, data)
70         except NameError as err:
71             logging.error(
72                 f"Probably algorithm {table[u'algorithm']} is not defined: "
73                 f"{repr(err)}"
74             )
75     logging.info(u"Done.")
76
77
78 def table_oper_data_html(table, input_data):
79     """Generate the table(s) with algorithm: html_table_oper_data
80     specified in the specification file.
81
82     :param table: Table to generate.
83     :param input_data: Data to process.
84     :type table: pandas.Series
85     :type input_data: InputData
86     """
87
88     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
89     # Transform the data
90     logging.info(
91         f"    Creating the data set for the {table.get(u'type', u'')} "
92         f"{table.get(u'title', u'')}."
93     )
94     data = input_data.filter_data(
95         table,
96         params=[u"name", u"parent", u"show-run", u"type"],
97         continue_on_error=True
98     )
99     if data.empty:
100         return
101     data = input_data.merge_data(data)
102
103     sort_tests = table.get(u"sort", None)
104     if sort_tests:
105         args = dict(
106             inplace=True,
107             ascending=(sort_tests == u"ascending")
108         )
109         data.sort_index(**args)
110
111     suites = input_data.filter_data(
112         table,
113         continue_on_error=True,
114         data_set=u"suites"
115     )
116     if suites.empty:
117         return
118     suites = input_data.merge_data(suites)
119
120     def _generate_html_table(tst_data):
121         """Generate an HTML table with operational data for the given test.
122
123         :param tst_data: Test data to be used to generate the table.
124         :type tst_data: pandas.Series
125         :returns: HTML table with operational data.
126         :rtype: str
127         """
128
129         colors = {
130             u"header": u"#7eade7",
131             u"empty": u"#ffffff",
132             u"body": (u"#e9f1fb", u"#d4e4f7")
133         }
134
135         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
136
137         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138         thead = ET.SubElement(
139             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140         )
141         thead.text = tst_data[u"name"]
142
143         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144         thead = ET.SubElement(
145             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146         )
147         thead.text = u"\t"
148
149         if tst_data.get(u"show-run", u"No Data") == u"No Data":
150             trow = ET.SubElement(
151                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
152             )
153             tcol = ET.SubElement(
154                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
155             )
156             tcol.text = u"No Data"
157
158             trow = ET.SubElement(
159                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
160             )
161             thead = ET.SubElement(
162                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
163             )
164             font = ET.SubElement(
165                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166             )
167             font.text = u"."
168             return str(ET.tostring(tbl, encoding=u"unicode"))
169
170         tbl_hdr = (
171             u"Name",
172             u"Nr of Vectors",
173             u"Nr of Packets",
174             u"Suspends",
175             u"Cycles per Packet",
176             u"Average Vector Size"
177         )
178
179         for dut_data in tst_data[u"show-run"].values():
180             trow = ET.SubElement(
181                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
182             )
183             tcol = ET.SubElement(
184                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
185             )
186             if dut_data.get(u"threads", None) is None:
187                 tcol.text = u"No Data"
188                 continue
189
190             bold = ET.SubElement(tcol, u"b")
191             bold.text = (
192                 f"Host IP: {dut_data.get(u'host', '')}, "
193                 f"Socket: {dut_data.get(u'socket', '')}"
194             )
195             trow = ET.SubElement(
196                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
197             )
198             thead = ET.SubElement(
199                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200             )
201             thead.text = u"\t"
202
203             for thread_nr, thread in dut_data[u"threads"].items():
204                 trow = ET.SubElement(
205                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
206                 )
207                 tcol = ET.SubElement(
208                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
209                 )
210                 bold = ET.SubElement(tcol, u"b")
211                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212                 trow = ET.SubElement(
213                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
214                 )
215                 for idx, col in enumerate(tbl_hdr):
216                     tcol = ET.SubElement(
217                         trow, u"td",
218                         attrib=dict(align=u"right" if idx else u"left")
219                     )
220                     font = ET.SubElement(
221                         tcol, u"font", attrib=dict(size=u"2")
222                     )
223                     bold = ET.SubElement(font, u"b")
224                     bold.text = col
225                 for row_nr, row in enumerate(thread):
226                     trow = ET.SubElement(
227                         tbl, u"tr",
228                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
229                     )
230                     for idx, col in enumerate(row):
231                         tcol = ET.SubElement(
232                             trow, u"td",
233                             attrib=dict(align=u"right" if idx else u"left")
234                         )
235                         font = ET.SubElement(
236                             tcol, u"font", attrib=dict(size=u"2")
237                         )
238                         if isinstance(col, float):
239                             font.text = f"{col:.2f}"
240                         else:
241                             font.text = str(col)
242                 trow = ET.SubElement(
243                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
244                 )
245                 thead = ET.SubElement(
246                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247                 )
248                 thead.text = u"\t"
249
250         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251         thead = ET.SubElement(
252             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253         )
254         font = ET.SubElement(
255             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256         )
257         font.text = u"."
258
259         return str(ET.tostring(tbl, encoding=u"unicode"))
260
261     for suite in suites.values:
262         html_table = str()
263         for test_data in data.values:
264             if test_data[u"parent"] not in suite[u"name"]:
265                 continue
266             html_table += _generate_html_table(test_data)
267         if not html_table:
268             continue
269         try:
270             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271             with open(f"{file_name}", u'w') as html_file:
272                 logging.info(f"    Writing file: {file_name}")
273                 html_file.write(u".. raw:: html\n\n\t")
274                 html_file.write(html_table)
275                 html_file.write(u"\n\t<p><br><br></p>\n")
276         except KeyError:
277             logging.warning(u"The output file is not defined.")
278             return
279     logging.info(u"  Done.")
280
281
282 def table_merged_details(table, input_data):
283     """Generate the table(s) with algorithm: table_merged_details
284     specified in the specification file.
285
286     :param table: Table to generate.
287     :param input_data: Data to process.
288     :type table: pandas.Series
289     :type input_data: InputData
290     """
291
292     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
293
294     # Transform the data
295     logging.info(
296         f"    Creating the data set for the {table.get(u'type', u'')} "
297         f"{table.get(u'title', u'')}."
298     )
299     data = input_data.filter_data(table, continue_on_error=True)
300     data = input_data.merge_data(data)
301
302     sort_tests = table.get(u"sort", None)
303     if sort_tests:
304         args = dict(
305             inplace=True,
306             ascending=(sort_tests == u"ascending")
307         )
308         data.sort_index(**args)
309
310     suites = input_data.filter_data(
311         table, continue_on_error=True, data_set=u"suites")
312     suites = input_data.merge_data(suites)
313
314     # Prepare the header of the tables
315     header = list()
316     for column in table[u"columns"]:
317         header.append(
318             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319         )
320
321     for suite in suites.values:
322         # Generate data
323         suite_name = suite[u"name"]
324         table_lst = list()
325         for test in data.keys():
326             if data[test][u"parent"] not in suite_name:
327                 continue
328             row_lst = list()
329             for column in table[u"columns"]:
330                 try:
331                     col_data = str(data[test][column[
332                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
333                     # Do not include tests with "Test Failed" in test message
334                     if u"Test Failed" in col_data:
335                         continue
336                     col_data = col_data.replace(
337                         u"No Data", u"Not Captured     "
338                     )
339                     if column[u"data"].split(u" ")[1] in (u"name", ):
340                         if len(col_data) > 30:
341                             col_data_lst = col_data.split(u"-")
342                             half = int(len(col_data_lst) / 2)
343                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
344                                        f"- |br| " \
345                                        f"{u'-'.join(col_data_lst[half:])}"
346                         col_data = f" |prein| {col_data} |preout| "
347                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
348                         # Temporary solution: remove NDR results from message:
349                         if bool(table.get(u'remove-ndr', False)):
350                             try:
351                                 col_data = col_data.split(u" |br| ", 1)[1]
352                             except IndexError:
353                                 pass
354                         col_data = f" |prein| {col_data} |preout| "
355                     elif column[u"data"].split(u" ")[1] in \
356                             (u"conf-history", u"show-run"):
357                         col_data = col_data.replace(u" |br| ", u"", 1)
358                         col_data = f" |prein| {col_data[:-5]} |preout| "
359                     row_lst.append(f'"{col_data}"')
360                 except KeyError:
361                     row_lst.append(u'"Not captured"')
362             if len(row_lst) == len(table[u"columns"]):
363                 table_lst.append(row_lst)
364
365         # Write the data to file
366         if table_lst:
367             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369             logging.info(f"      Writing file: {file_name}")
370             with open(file_name, u"wt") as file_handler:
371                 file_handler.write(u",".join(header) + u"\n")
372                 for item in table_lst:
373                     file_handler.write(u",".join(item) + u"\n")
374
375     logging.info(u"  Done.")
376
377
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379     """Modify a test name by replacing its parts.
380
381     :param test_name: Test name to be modified.
382     :param ignore_nic: If True, NIC is removed from TC name.
383     :type test_name: str
384     :type ignore_nic: bool
385     :returns: Modified test name.
386     :rtype: str
387     """
388     test_name_mod = test_name.\
389         replace(u"-ndrpdrdisc", u""). \
390         replace(u"-ndrpdr", u"").\
391         replace(u"-pdrdisc", u""). \
392         replace(u"-ndrdisc", u"").\
393         replace(u"-pdr", u""). \
394         replace(u"-ndr", u""). \
395         replace(u"1t1c", u"1c").\
396         replace(u"2t1c", u"1c"). \
397         replace(u"2t2c", u"2c").\
398         replace(u"4t2c", u"2c"). \
399         replace(u"4t4c", u"4c").\
400         replace(u"8t4c", u"4c")
401
402     if ignore_nic:
403         return re.sub(REGEX_NIC, u"", test_name_mod)
404     return test_name_mod
405
406
407 def _tpc_modify_displayed_test_name(test_name):
408     """Modify a test name which is displayed in a table by replacing its parts.
409
410     :param test_name: Test name to be modified.
411     :type test_name: str
412     :returns: Modified test name.
413     :rtype: str
414     """
415     return test_name.\
416         replace(u"1t1c", u"1c").\
417         replace(u"2t1c", u"1c"). \
418         replace(u"2t2c", u"2c").\
419         replace(u"4t2c", u"2c"). \
420         replace(u"4t4c", u"4c").\
421         replace(u"8t4c", u"4c")
422
423
424 def _tpc_insert_data(target, src, include_tests):
425     """Insert src data to the target structure.
426
427     :param target: Target structure where the data is placed.
428     :param src: Source data to be placed into the target stucture.
429     :param include_tests: Which results will be included (MRR, NDR, PDR).
430     :type target: list
431     :type src: dict
432     :type include_tests: str
433     """
434     try:
435         if include_tests == u"MRR":
436             target.append(
437                 (
438                     src[u"result"][u"receive-rate"],
439                     src[u"result"][u"receive-stdev"]
440                 )
441             )
442         elif include_tests == u"PDR":
443             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
444         elif include_tests == u"NDR":
445             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
446     except (KeyError, TypeError):
447         pass
448
449
450 def _tpc_sort_table(table):
451     """Sort the table this way:
452
453     1. Put "New in CSIT-XXXX" at the first place.
454     2. Put "See footnote" at the second place.
455     3. Sort the rest by "Delta".
456
457     :param table: Table to sort.
458     :type table: list
459     :returns: Sorted table.
460     :rtype: list
461     """
462
463     tbl_new = list()
464     tbl_see = list()
465     tbl_delta = list()
466     for item in table:
467         if isinstance(item[-1], str):
468             if u"New in CSIT" in item[-1]:
469                 tbl_new.append(item)
470             elif u"See footnote" in item[-1]:
471                 tbl_see.append(item)
472         else:
473             tbl_delta.append(item)
474
475     # Sort the tables:
476     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
477     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
478     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
479     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
480     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
481
482     # Put the tables together:
483     table = list()
484     # We do not want "New in CSIT":
485     # table.extend(tbl_new)
486     table.extend(tbl_see)
487     table.extend(tbl_delta)
488
489     return table
490
491
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493                              footnote=u"", sort_data=True):
494     """Generate html table from input data with simple sorting possibility.
495
496     :param header: Table header.
497     :param data: Input data to be included in the table. It is a list of lists.
498         Inner lists are rows in the table. All inner lists must be of the same
499         length. The length of these lists must be the same as the length of the
500         header.
501     :param out_file_name: The name (relative or full path) where the
502         generated html table is written.
503     :param legend: The legend to display below the table.
504     :param footnote: The footnote to display below the table (and legend).
505     :param sort_data: If True the data sorting is enabled.
506     :type header: list
507     :type data: list of lists
508     :type out_file_name: str
509     :type legend: str
510     :type footnote: str
511     :type sort_data: bool
512     """
513
514     try:
515         idx = header.index(u"Test Case")
516     except ValueError:
517         idx = 0
518     params = {
519         u"align-hdr": (
520             [u"left", u"center"],
521             [u"left", u"left", u"center"],
522             [u"left", u"left", u"left", u"center"]
523         ),
524         u"align-itm": (
525             [u"left", u"right"],
526             [u"left", u"left", u"right"],
527             [u"left", u"left", u"left", u"right"]
528         ),
529         u"width": ([28, 9], [4, 24, 10], [4, 4, 32, 10])
530     }
531
532     df_data = pd.DataFrame(data, columns=header)
533
534     if sort_data:
535         df_sorted = [df_data.sort_values(
536             by=[key, header[idx]], ascending=[True, True]
537             if key != header[idx] else [False, True]) for key in header]
538         df_sorted_rev = [df_data.sort_values(
539             by=[key, header[idx]], ascending=[False, True]
540             if key != header[idx] else [True, True]) for key in header]
541         df_sorted.extend(df_sorted_rev)
542     else:
543         df_sorted = df_data
544
545     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
546                    for idx in range(len(df_data))]]
547     table_header = dict(
548         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
549         fill_color=u"#7eade7",
550         align=params[u"align-hdr"][idx]
551     )
552
553     fig = go.Figure()
554
555     if sort_data:
556         for table in df_sorted:
557             columns = [table.get(col) for col in header]
558             fig.add_trace(
559                 go.Table(
560                     columnwidth=params[u"width"][idx],
561                     header=table_header,
562                     cells=dict(
563                         values=columns,
564                         fill_color=fill_color,
565                         align=params[u"align-itm"][idx]
566                     )
567                 )
568             )
569
570         buttons = list()
571         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
572         menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
573         menu_items.extend(menu_items_rev)
574         for idx, hdr in enumerate(menu_items):
575             visible = [False, ] * len(menu_items)
576             visible[idx] = True
577             buttons.append(
578                 dict(
579                     label=hdr.replace(u" [Mpps]", u""),
580                     method=u"update",
581                     args=[{u"visible": visible}],
582                 )
583             )
584
585         fig.update_layout(
586             updatemenus=[
587                 go.layout.Updatemenu(
588                     type=u"dropdown",
589                     direction=u"down",
590                     x=0.0,
591                     xanchor=u"left",
592                     y=1.045,
593                     yanchor=u"top",
594                     active=len(menu_items) - 1,
595                     buttons=list(buttons)
596                 )
597             ],
598         )
599     else:
600         fig.add_trace(
601             go.Table(
602                 columnwidth=params[u"width"][idx],
603                 header=table_header,
604                 cells=dict(
605                     values=[df_sorted.get(col) for col in header],
606                     fill_color=fill_color,
607                     align=params[u"align-itm"][idx]
608                 )
609             )
610         )
611
612     ploff.plot(
613         fig,
614         show_link=False,
615         auto_open=False,
616         filename=f"{out_file_name}_in.html"
617     )
618
619     file_name = out_file_name.split(u"/")[-1]
620     if u"vpp" in out_file_name:
621         path = u"_tmp/src/vpp_performance_tests/comparisons/"
622     else:
623         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
624     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
625         rst_file.write(
626             u"\n"
627             u".. |br| raw:: html\n\n    <br />\n\n\n"
628             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
629             u".. |preout| raw:: html\n\n    </pre>\n\n"
630         )
631         rst_file.write(
632             u".. raw:: html\n\n"
633             f'    <iframe frameborder="0" scrolling="no" '
634             f'width="1600" height="1200" '
635             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
636             f'</iframe>\n\n'
637         )
638         if legend:
639             rst_file.write(legend[1:].replace(u"\n", u" |br| "))
640         if footnote:
641             rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
642
643
644 def table_perf_comparison(table, input_data):
645     """Generate the table(s) with algorithm: table_perf_comparison
646     specified in the specification file.
647
648     :param table: Table to generate.
649     :param input_data: Data to process.
650     :type table: pandas.Series
651     :type input_data: InputData
652     """
653
654     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
655
656     # Transform the data
657     logging.info(
658         f"    Creating the data set for the {table.get(u'type', u'')} "
659         f"{table.get(u'title', u'')}."
660     )
661     data = input_data.filter_data(table, continue_on_error=True)
662
663     # Prepare the header of the tables
664     try:
665         header = [u"Test Case", ]
666         legend = u"\nLegend:\n"
667
668         rca_data = None
669         rca = table.get(u"rca", None)
670         if rca:
671             try:
672                 with open(rca.get(u"data-file", u""), u"r") as rca_file:
673                     rca_data = load(rca_file, Loader=FullLoader)
674                 header.insert(0, rca.get(u"title", u"RCA"))
675                 legend += (
676                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
677                 )
678             except (YAMLError, IOError) as err:
679                 logging.warning(repr(err))
680
681         history = table.get(u"history", list())
682         for item in history:
683             header.extend(
684                 [
685                     f"{item[u'title']} Avg({table[u'include-tests']})",
686                     f"{item[u'title']} Stdev({table[u'include-tests']})"
687                 ]
688             )
689             legend += (
690                 f"{item[u'title']} Avg({table[u'include-tests']}): "
691                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
692                 f"a series of runs of the listed tests executed against "
693                 f"{item[u'title']}.\n"
694                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
695                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
696                 f"computed from a series of runs of the listed tests executed "
697                 f"against {item[u'title']}.\n"
698             )
699         header.extend(
700             [
701                 f"{table[u'reference'][u'title']} "
702                 f"Avg({table[u'include-tests']})",
703                 f"{table[u'reference'][u'title']} "
704                 f"Stdev({table[u'include-tests']})",
705                 f"{table[u'compare'][u'title']} "
706                 f"Avg({table[u'include-tests']})",
707                 f"{table[u'compare'][u'title']} "
708                 f"Stdev({table[u'include-tests']})",
709                 f"Diff({table[u'reference'][u'title']},"
710                 f"{table[u'compare'][u'title']})",
711                 u"Stdev(Diff)"
712             ]
713         )
714         header_str = u";".join(header) + u"\n"
715         legend += (
716             f"{table[u'reference'][u'title']} "
717             f"Avg({table[u'include-tests']}): "
718             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
719             f"series of runs of the listed tests executed against "
720             f"{table[u'reference'][u'title']}.\n"
721             f"{table[u'reference'][u'title']} "
722             f"Stdev({table[u'include-tests']}): "
723             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
724             f"computed from a series of runs of the listed tests executed "
725             f"against {table[u'reference'][u'title']}.\n"
726             f"{table[u'compare'][u'title']} "
727             f"Avg({table[u'include-tests']}): "
728             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
729             f"series of runs of the listed tests executed against "
730             f"{table[u'compare'][u'title']}.\n"
731             f"{table[u'compare'][u'title']} "
732             f"Stdev({table[u'include-tests']}): "
733             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
734             f"computed from a series of runs of the listed tests executed "
735             f"against {table[u'compare'][u'title']}.\n"
736             f"Diff({table[u'reference'][u'title']},"
737             f"{table[u'compare'][u'title']}): "
738             f"Percentage change calculated for mean values.\n"
739             u"Stdev(Diff): "
740             u"Standard deviation of percentage change calculated for mean "
741             u"values.\n"
742             u"NT: Not Tested\n"
743         )
744     except (AttributeError, KeyError) as err:
745         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
746         return
747
748     # Prepare data to the table:
749     tbl_dict = dict()
750     for job, builds in table[u"reference"][u"data"].items():
751         for build in builds:
752             for tst_name, tst_data in data[job][str(build)].items():
753                 tst_name_mod = _tpc_modify_test_name(tst_name)
754                 if (u"across topologies" in table[u"title"].lower() or
755                         (u" 3n-" in table[u"title"].lower() and
756                          u" 2n-" in table[u"title"].lower())):
757                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
758                 if tbl_dict.get(tst_name_mod, None) is None:
759                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
760                     if u"across testbeds" in table[u"title"].lower() or \
761                             u"across topologies" in table[u"title"].lower():
762                         name = _tpc_modify_displayed_test_name(name)
763                     tbl_dict[tst_name_mod] = {
764                         u"name": name,
765                         u"replace-ref": True,
766                         u"replace-cmp": True,
767                         u"ref-data": list(),
768                         u"cmp-data": list()
769                     }
770                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
771                                  src=tst_data,
772                                  include_tests=table[u"include-tests"])
773
774     replacement = table[u"reference"].get(u"data-replacement", None)
775     if replacement:
776         rpl_data = input_data.filter_data(
777             table, data=replacement, continue_on_error=True)
778         for job, builds in replacement.items():
779             for build in builds:
780                 for tst_name, tst_data in rpl_data[job][str(build)].items():
781                     tst_name_mod = _tpc_modify_test_name(tst_name)
782                     if (u"across topologies" in table[u"title"].lower() or
783                             (u" 3n-" in table[u"title"].lower() and
784                              u" 2n-" in table[u"title"].lower())):
785                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
786                     if tbl_dict.get(tst_name_mod, None) is None:
787                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
788                         if u"across testbeds" in table[u"title"].lower() or \
789                                 u"across topologies" in table[u"title"].lower():
790                             name = _tpc_modify_displayed_test_name(name)
791                         tbl_dict[tst_name_mod] = {
792                             u"name": name,
793                             u"replace-ref": False,
794                             u"replace-cmp": True,
795                             u"ref-data": list(),
796                             u"cmp-data": list()
797                         }
798                     if tbl_dict[tst_name_mod][u"replace-ref"]:
799                         tbl_dict[tst_name_mod][u"replace-ref"] = False
800                         tbl_dict[tst_name_mod][u"ref-data"] = list()
801
802                     _tpc_insert_data(
803                         target=tbl_dict[tst_name_mod][u"ref-data"],
804                         src=tst_data,
805                         include_tests=table[u"include-tests"]
806                     )
807
808     for job, builds in table[u"compare"][u"data"].items():
809         for build in builds:
810             for tst_name, tst_data in data[job][str(build)].items():
811                 tst_name_mod = _tpc_modify_test_name(tst_name)
812                 if (u"across topologies" in table[u"title"].lower() or
813                         (u" 3n-" in table[u"title"].lower() and
814                          u" 2n-" in table[u"title"].lower())):
815                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
816                 if tbl_dict.get(tst_name_mod, None) is None:
817                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
818                     if u"across testbeds" in table[u"title"].lower() or \
819                             u"across topologies" in table[u"title"].lower():
820                         name = _tpc_modify_displayed_test_name(name)
821                     tbl_dict[tst_name_mod] = {
822                         u"name": name,
823                         u"replace-ref": False,
824                         u"replace-cmp": True,
825                         u"ref-data": list(),
826                         u"cmp-data": list()
827                     }
828                 _tpc_insert_data(
829                     target=tbl_dict[tst_name_mod][u"cmp-data"],
830                     src=tst_data,
831                     include_tests=table[u"include-tests"]
832                 )
833
834     replacement = table[u"compare"].get(u"data-replacement", None)
835     if replacement:
836         rpl_data = input_data.filter_data(
837             table, data=replacement, continue_on_error=True)
838         for job, builds in replacement.items():
839             for build in builds:
840                 for tst_name, tst_data in rpl_data[job][str(build)].items():
841                     tst_name_mod = _tpc_modify_test_name(tst_name)
842                     if (u"across topologies" in table[u"title"].lower() or
843                             (u" 3n-" in table[u"title"].lower() and
844                              u" 2n-" in table[u"title"].lower())):
845                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
846                     if tbl_dict.get(tst_name_mod, None) is None:
847                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
848                         if u"across testbeds" in table[u"title"].lower() or \
849                                 u"across topologies" in table[u"title"].lower():
850                             name = _tpc_modify_displayed_test_name(name)
851                         tbl_dict[tst_name_mod] = {
852                             u"name": name,
853                             u"replace-ref": False,
854                             u"replace-cmp": False,
855                             u"ref-data": list(),
856                             u"cmp-data": list()
857                         }
858                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
859                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
860                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
861
862                     _tpc_insert_data(
863                         target=tbl_dict[tst_name_mod][u"cmp-data"],
864                         src=tst_data,
865                         include_tests=table[u"include-tests"]
866                     )
867
868     for item in history:
869         for job, builds in item[u"data"].items():
870             for build in builds:
871                 for tst_name, tst_data in data[job][str(build)].items():
872                     tst_name_mod = _tpc_modify_test_name(tst_name)
873                     if (u"across topologies" in table[u"title"].lower() or
874                             (u" 3n-" in table[u"title"].lower() and
875                              u" 2n-" in table[u"title"].lower())):
876                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
877                     if tbl_dict.get(tst_name_mod, None) is None:
878                         continue
879                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
880                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
881                     if tbl_dict[tst_name_mod][u"history"].\
882                             get(item[u"title"], None) is None:
883                         tbl_dict[tst_name_mod][u"history"][item[
884                             u"title"]] = list()
885                     try:
886                         if table[u"include-tests"] == u"MRR":
887                             res = (tst_data[u"result"][u"receive-rate"],
888                                    tst_data[u"result"][u"receive-stdev"])
889                         elif table[u"include-tests"] == u"PDR":
890                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
891                         elif table[u"include-tests"] == u"NDR":
892                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
893                         else:
894                             continue
895                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
896                             append(res)
897                     except (TypeError, KeyError):
898                         pass
899
900     tbl_lst = list()
901     for tst_name in tbl_dict:
902         item = [tbl_dict[tst_name][u"name"], ]
903         if history:
904             if tbl_dict[tst_name].get(u"history", None) is not None:
905                 for hist_data in tbl_dict[tst_name][u"history"].values():
906                     if hist_data:
907                         if table[u"include-tests"] == u"MRR":
908                             item.append(round(hist_data[0][0] / 1e6, 1))
909                             item.append(round(hist_data[0][1] / 1e6, 1))
910                         else:
911                             item.append(round(mean(hist_data) / 1e6, 1))
912                             item.append(round(stdev(hist_data) / 1e6, 1))
913                     else:
914                         item.extend([u"NT", u"NT"])
915             else:
916                 item.extend([u"NT", u"NT"])
917         data_r = tbl_dict[tst_name][u"ref-data"]
918         if data_r:
919             if table[u"include-tests"] == u"MRR":
920                 data_r_mean = data_r[0][0]
921                 data_r_stdev = data_r[0][1]
922             else:
923                 data_r_mean = mean(data_r)
924                 data_r_stdev = stdev(data_r)
925             item.append(round(data_r_mean / 1e6, 1))
926             item.append(round(data_r_stdev / 1e6, 1))
927         else:
928             data_r_mean = None
929             data_r_stdev = None
930             item.extend([u"NT", u"NT"])
931         data_c = tbl_dict[tst_name][u"cmp-data"]
932         if data_c:
933             if table[u"include-tests"] == u"MRR":
934                 data_c_mean = data_c[0][0]
935                 data_c_stdev = data_c[0][1]
936             else:
937                 data_c_mean = mean(data_c)
938                 data_c_stdev = stdev(data_c)
939             item.append(round(data_c_mean / 1e6, 1))
940             item.append(round(data_c_stdev / 1e6, 1))
941         else:
942             data_c_mean = None
943             data_c_stdev = None
944             item.extend([u"NT", u"NT"])
945         if item[-2] == u"NT":
946             pass
947         elif item[-4] == u"NT":
948             item.append(u"New in CSIT-2001")
949             item.append(u"New in CSIT-2001")
950         elif data_r_mean is not None and data_c_mean is not None:
951             delta, d_stdev = relative_change_stdev(
952                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
953             )
954             try:
955                 item.append(round(delta))
956             except ValueError:
957                 item.append(delta)
958             try:
959                 item.append(round(d_stdev))
960             except ValueError:
961                 item.append(d_stdev)
962         if rca_data:
963             rca_nr = rca_data.get(item[0], u"-")
964             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
965         if (len(item) == len(header)) and (item[-4] != u"NT"):
966             tbl_lst.append(item)
967
968     tbl_lst = _tpc_sort_table(tbl_lst)
969
970     # Generate csv tables:
971     csv_file = f"{table[u'output-file']}.csv"
972     with open(csv_file, u"wt") as file_handler:
973         file_handler.write(header_str)
974         for test in tbl_lst:
975             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
976
977     txt_file_name = f"{table[u'output-file']}.txt"
978     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
979
980     footnote = u""
981     with open(txt_file_name, u'a') as txt_file:
982         txt_file.write(legend)
983         if rca_data:
984             footnote = rca_data.get(u"footnote", u"")
985             if footnote:
986                 txt_file.write(footnote)
987         txt_file.write(u":END")
988
989     # Generate html table:
990     _tpc_generate_html_table(
991         header,
992         tbl_lst,
993         table[u'output-file'],
994         legend=legend,
995         footnote=footnote
996     )
997
998
999 def table_perf_comparison_nic(table, input_data):
1000     """Generate the table(s) with algorithm: table_perf_comparison
1001     specified in the specification file.
1002
1003     :param table: Table to generate.
1004     :param input_data: Data to process.
1005     :type table: pandas.Series
1006     :type input_data: InputData
1007     """
1008
1009     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1010
1011     # Transform the data
1012     logging.info(
1013         f"    Creating the data set for the {table.get(u'type', u'')} "
1014         f"{table.get(u'title', u'')}."
1015     )
1016     data = input_data.filter_data(table, continue_on_error=True)
1017
1018     # Prepare the header of the tables
1019     try:
1020         header = [u"Test Case", ]
1021         legend = u"\nLegend:\n"
1022
1023         rca_data = None
1024         rca = table.get(u"rca", None)
1025         if rca:
1026             try:
1027                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1028                     rca_data = load(rca_file, Loader=FullLoader)
1029                 header.insert(0, rca.get(u"title", "RCA"))
1030                 legend += (
1031                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
1032                 )
1033             except (YAMLError, IOError) as err:
1034                 logging.warning(repr(err))
1035
1036         history = table.get(u"history", list())
1037         for item in history:
1038             header.extend(
1039                 [
1040                     f"{item[u'title']} Avg({table[u'include-tests']})",
1041                     f"{item[u'title']} Stdev({table[u'include-tests']})"
1042                 ]
1043             )
1044             legend += (
1045                 f"{item[u'title']} Avg({table[u'include-tests']}): "
1046                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1047                 f"a series of runs of the listed tests executed against "
1048                 f"{item[u'title']}.\n"
1049                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1050                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1051                 f"computed from a series of runs of the listed tests executed "
1052                 f"against {item[u'title']}.\n"
1053             )
1054         header.extend(
1055             [
1056                 f"{table[u'reference'][u'title']} "
1057                 f"Avg({table[u'include-tests']})",
1058                 f"{table[u'reference'][u'title']} "
1059                 f"Stdev({table[u'include-tests']})",
1060                 f"{table[u'compare'][u'title']} "
1061                 f"Avg({table[u'include-tests']})",
1062                 f"{table[u'compare'][u'title']} "
1063                 f"Stdev({table[u'include-tests']})",
1064                 f"Diff({table[u'reference'][u'title']},"
1065                 f"{table[u'compare'][u'title']})",
1066                 u"Stdev(Diff)"
1067             ]
1068         )
1069         header_str = u";".join(header) + u"\n"
1070         legend += (
1071             f"{table[u'reference'][u'title']} "
1072             f"Avg({table[u'include-tests']}): "
1073             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1074             f"series of runs of the listed tests executed against "
1075             f"{table[u'reference'][u'title']}.\n"
1076             f"{table[u'reference'][u'title']} "
1077             f"Stdev({table[u'include-tests']}): "
1078             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1079             f"computed from a series of runs of the listed tests executed "
1080             f"against {table[u'reference'][u'title']}.\n"
1081             f"{table[u'compare'][u'title']} "
1082             f"Avg({table[u'include-tests']}): "
1083             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1084             f"series of runs of the listed tests executed against "
1085             f"{table[u'compare'][u'title']}.\n"
1086             f"{table[u'compare'][u'title']} "
1087             f"Stdev({table[u'include-tests']}): "
1088             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1089             f"computed from a series of runs of the listed tests executed "
1090             f"against {table[u'compare'][u'title']}.\n"
1091             f"Diff({table[u'reference'][u'title']},"
1092             f"{table[u'compare'][u'title']}): "
1093             f"Percentage change calculated for mean values.\n"
1094             u"Stdev(Diff): "
1095             u"Standard deviation of percentage change calculated for mean "
1096             u"values.\n"
1097             u"NT: Not Tested\n"
1098         )
1099     except (AttributeError, KeyError) as err:
1100         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1101         return
1102
1103     # Prepare data to the table:
1104     tbl_dict = dict()
1105     for job, builds in table[u"reference"][u"data"].items():
1106         for build in builds:
1107             for tst_name, tst_data in data[job][str(build)].items():
1108                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1109                     continue
1110                 tst_name_mod = _tpc_modify_test_name(tst_name)
1111                 if (u"across topologies" in table[u"title"].lower() or
1112                         (u" 3n-" in table[u"title"].lower() and
1113                          u" 2n-" in table[u"title"].lower())):
1114                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1115                 if tbl_dict.get(tst_name_mod, None) is None:
1116                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1117                     if u"across testbeds" in table[u"title"].lower() or \
1118                             u"across topologies" in table[u"title"].lower():
1119                         name = _tpc_modify_displayed_test_name(name)
1120                     tbl_dict[tst_name_mod] = {
1121                         u"name": name,
1122                         u"replace-ref": True,
1123                         u"replace-cmp": True,
1124                         u"ref-data": list(),
1125                         u"cmp-data": list()
1126                     }
1127                 _tpc_insert_data(
1128                     target=tbl_dict[tst_name_mod][u"ref-data"],
1129                     src=tst_data,
1130                     include_tests=table[u"include-tests"]
1131                 )
1132
1133     replacement = table[u"reference"].get(u"data-replacement", None)
1134     if replacement:
1135         rpl_data = input_data.filter_data(
1136             table, data=replacement, continue_on_error=True)
1137         for job, builds in replacement.items():
1138             for build in builds:
1139                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1140                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1141                         continue
1142                     tst_name_mod = _tpc_modify_test_name(tst_name)
1143                     if (u"across topologies" in table[u"title"].lower() or
1144                             (u" 3n-" in table[u"title"].lower() and
1145                              u" 2n-" in table[u"title"].lower())):
1146                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1147                     if tbl_dict.get(tst_name_mod, None) is None:
1148                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1149                         if u"across testbeds" in table[u"title"].lower() or \
1150                                 u"across topologies" in table[u"title"].lower():
1151                             name = _tpc_modify_displayed_test_name(name)
1152                         tbl_dict[tst_name_mod] = {
1153                             u"name": name,
1154                             u"replace-ref": False,
1155                             u"replace-cmp": True,
1156                             u"ref-data": list(),
1157                             u"cmp-data": list()
1158                         }
1159                     if tbl_dict[tst_name_mod][u"replace-ref"]:
1160                         tbl_dict[tst_name_mod][u"replace-ref"] = False
1161                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1162
1163                     _tpc_insert_data(
1164                         target=tbl_dict[tst_name_mod][u"ref-data"],
1165                         src=tst_data,
1166                         include_tests=table[u"include-tests"]
1167                     )
1168
1169     for job, builds in table[u"compare"][u"data"].items():
1170         for build in builds:
1171             for tst_name, tst_data in data[job][str(build)].items():
1172                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1173                     continue
1174                 tst_name_mod = _tpc_modify_test_name(tst_name)
1175                 if (u"across topologies" in table[u"title"].lower() or
1176                         (u" 3n-" in table[u"title"].lower() and
1177                          u" 2n-" in table[u"title"].lower())):
1178                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1179                 if tbl_dict.get(tst_name_mod, None) is None:
1180                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1181                     if u"across testbeds" in table[u"title"].lower() or \
1182                             u"across topologies" in table[u"title"].lower():
1183                         name = _tpc_modify_displayed_test_name(name)
1184                     tbl_dict[tst_name_mod] = {
1185                         u"name": name,
1186                         u"replace-ref": False,
1187                         u"replace-cmp": True,
1188                         u"ref-data": list(),
1189                         u"cmp-data": list()
1190                     }
1191                 _tpc_insert_data(
1192                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1193                     src=tst_data,
1194                     include_tests=table[u"include-tests"]
1195                 )
1196
1197     replacement = table[u"compare"].get(u"data-replacement", None)
1198     if replacement:
1199         rpl_data = input_data.filter_data(
1200             table, data=replacement, continue_on_error=True)
1201         for job, builds in replacement.items():
1202             for build in builds:
1203                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1204                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1205                         continue
1206                     tst_name_mod = _tpc_modify_test_name(tst_name)
1207                     if (u"across topologies" in table[u"title"].lower() or
1208                             (u" 3n-" in table[u"title"].lower() and
1209                              u" 2n-" in table[u"title"].lower())):
1210                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1211                     if tbl_dict.get(tst_name_mod, None) is None:
1212                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1213                         if u"across testbeds" in table[u"title"].lower() or \
1214                                 u"across topologies" in table[u"title"].lower():
1215                             name = _tpc_modify_displayed_test_name(name)
1216                         tbl_dict[tst_name_mod] = {
1217                             u"name": name,
1218                             u"replace-ref": False,
1219                             u"replace-cmp": False,
1220                             u"ref-data": list(),
1221                             u"cmp-data": list()
1222                         }
1223                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
1224                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
1225                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1226
1227                     _tpc_insert_data(
1228                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1229                         src=tst_data,
1230                         include_tests=table[u"include-tests"]
1231                     )
1232
1233     for item in history:
1234         for job, builds in item[u"data"].items():
1235             for build in builds:
1236                 for tst_name, tst_data in data[job][str(build)].items():
1237                     if item[u"nic"] not in tst_data[u"tags"]:
1238                         continue
1239                     tst_name_mod = _tpc_modify_test_name(tst_name)
1240                     if (u"across topologies" in table[u"title"].lower() or
1241                             (u" 3n-" in table[u"title"].lower() and
1242                              u" 2n-" in table[u"title"].lower())):
1243                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1244                     if tbl_dict.get(tst_name_mod, None) is None:
1245                         continue
1246                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1247                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1248                     if tbl_dict[tst_name_mod][u"history"].\
1249                             get(item[u"title"], None) is None:
1250                         tbl_dict[tst_name_mod][u"history"][item[
1251                             u"title"]] = list()
1252                     try:
1253                         if table[u"include-tests"] == u"MRR":
1254                             res = (tst_data[u"result"][u"receive-rate"],
1255                                    tst_data[u"result"][u"receive-stdev"])
1256                         elif table[u"include-tests"] == u"PDR":
1257                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1258                         elif table[u"include-tests"] == u"NDR":
1259                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1260                         else:
1261                             continue
1262                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1263                             append(res)
1264                     except (TypeError, KeyError):
1265                         pass
1266
1267     tbl_lst = list()
1268     for tst_name in tbl_dict:
1269         item = [tbl_dict[tst_name][u"name"], ]
1270         if history:
1271             if tbl_dict[tst_name].get(u"history", None) is not None:
1272                 for hist_data in tbl_dict[tst_name][u"history"].values():
1273                     if hist_data:
1274                         if table[u"include-tests"] == u"MRR":
1275                             item.append(round(hist_data[0][0] / 1e6, 1))
1276                             item.append(round(hist_data[0][1] / 1e6, 1))
1277                         else:
1278                             item.append(round(mean(hist_data) / 1e6, 1))
1279                             item.append(round(stdev(hist_data) / 1e6, 1))
1280                     else:
1281                         item.extend([u"NT", u"NT"])
1282             else:
1283                 item.extend([u"NT", u"NT"])
1284         data_r = tbl_dict[tst_name][u"ref-data"]
1285         if data_r:
1286             if table[u"include-tests"] == u"MRR":
1287                 data_r_mean = data_r[0][0]
1288                 data_r_stdev = data_r[0][1]
1289             else:
1290                 data_r_mean = mean(data_r)
1291                 data_r_stdev = stdev(data_r)
1292             item.append(round(data_r_mean / 1e6, 1))
1293             item.append(round(data_r_stdev / 1e6, 1))
1294         else:
1295             data_r_mean = None
1296             data_r_stdev = None
1297             item.extend([u"NT", u"NT"])
1298         data_c = tbl_dict[tst_name][u"cmp-data"]
1299         if data_c:
1300             if table[u"include-tests"] == u"MRR":
1301                 data_c_mean = data_c[0][0]
1302                 data_c_stdev = data_c[0][1]
1303             else:
1304                 data_c_mean = mean(data_c)
1305                 data_c_stdev = stdev(data_c)
1306             item.append(round(data_c_mean / 1e6, 1))
1307             item.append(round(data_c_stdev / 1e6, 1))
1308         else:
1309             data_c_mean = None
1310             data_c_stdev = None
1311             item.extend([u"NT", u"NT"])
1312         if item[-2] == u"NT":
1313             pass
1314         elif item[-4] == u"NT":
1315             item.append(u"New in CSIT-2001")
1316             item.append(u"New in CSIT-2001")
1317         elif data_r_mean is not None and data_c_mean is not None:
1318             delta, d_stdev = relative_change_stdev(
1319                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1320             )
1321             try:
1322                 item.append(round(delta))
1323             except ValueError:
1324                 item.append(delta)
1325             try:
1326                 item.append(round(d_stdev))
1327             except ValueError:
1328                 item.append(d_stdev)
1329         if rca_data:
1330             rca_nr = rca_data.get(item[0], u"-")
1331             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1332         if (len(item) == len(header)) and (item[-4] != u"NT"):
1333             tbl_lst.append(item)
1334
1335     tbl_lst = _tpc_sort_table(tbl_lst)
1336
1337     # Generate csv tables:
1338     csv_file = f"{table[u'output-file']}.csv"
1339     with open(csv_file, u"wt") as file_handler:
1340         file_handler.write(header_str)
1341         for test in tbl_lst:
1342             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1343
1344     txt_file_name = f"{table[u'output-file']}.txt"
1345     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1346
1347     footnote = u""
1348     with open(txt_file_name, u'a') as txt_file:
1349         txt_file.write(legend)
1350         if rca_data:
1351             footnote = rca_data.get(u"footnote", u"")
1352             if footnote:
1353                 txt_file.write(footnote)
1354         txt_file.write(u":END")
1355
1356     # Generate html table:
1357     _tpc_generate_html_table(
1358         header,
1359         tbl_lst,
1360         table[u'output-file'],
1361         legend=legend,
1362         footnote=footnote
1363     )
1364
1365
1366 def table_nics_comparison(table, input_data):
1367     """Generate the table(s) with algorithm: table_nics_comparison
1368     specified in the specification file.
1369
1370     :param table: Table to generate.
1371     :param input_data: Data to process.
1372     :type table: pandas.Series
1373     :type input_data: InputData
1374     """
1375
1376     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1377
1378     # Transform the data
1379     logging.info(
1380         f"    Creating the data set for the {table.get(u'type', u'')} "
1381         f"{table.get(u'title', u'')}."
1382     )
1383     data = input_data.filter_data(table, continue_on_error=True)
1384
1385     # Prepare the header of the tables
1386     try:
1387         header = [
1388             u"Test Case",
1389             f"{table[u'reference'][u'title']} "
1390             f"Avg({table[u'include-tests']})",
1391             f"{table[u'reference'][u'title']} "
1392             f"Stdev({table[u'include-tests']})",
1393             f"{table[u'compare'][u'title']} "
1394             f"Avg({table[u'include-tests']})",
1395             f"{table[u'compare'][u'title']} "
1396             f"Stdev({table[u'include-tests']})",
1397             f"Diff({table[u'reference'][u'title']},"
1398             f"{table[u'compare'][u'title']})",
1399             u"Stdev(Diff)"
1400         ]
1401         legend = (
1402             u"\nLegend:\n"
1403             f"{table[u'reference'][u'title']} "
1404             f"Avg({table[u'include-tests']}): "
1405             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1406             f"series of runs of the listed tests executed using "
1407             f"{table[u'reference'][u'title']} NIC.\n"
1408             f"{table[u'reference'][u'title']} "
1409             f"Stdev({table[u'include-tests']}): "
1410             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1411             f"computed from a series of runs of the listed tests executed "
1412             f"using {table[u'reference'][u'title']} NIC.\n"
1413             f"{table[u'compare'][u'title']} "
1414             f"Avg({table[u'include-tests']}): "
1415             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1416             f"series of runs of the listed tests executed using "
1417             f"{table[u'compare'][u'title']} NIC.\n"
1418             f"{table[u'compare'][u'title']} "
1419             f"Stdev({table[u'include-tests']}): "
1420             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1421             f"computed from a series of runs of the listed tests executed "
1422             f"using {table[u'compare'][u'title']} NIC.\n"
1423             f"Diff({table[u'reference'][u'title']},"
1424             f"{table[u'compare'][u'title']}): "
1425             f"Percentage change calculated for mean values.\n"
1426             u"Stdev(Diff): "
1427             u"Standard deviation of percentage change calculated for mean "
1428             u"values.\n"
1429             u":END"
1430         )
1431
1432     except (AttributeError, KeyError) as err:
1433         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1434         return
1435
1436     # Prepare data to the table:
1437     tbl_dict = dict()
1438     for job, builds in table[u"data"].items():
1439         for build in builds:
1440             for tst_name, tst_data in data[job][str(build)].items():
1441                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1442                 if tbl_dict.get(tst_name_mod, None) is None:
1443                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1444                     tbl_dict[tst_name_mod] = {
1445                         u"name": name,
1446                         u"ref-data": list(),
1447                         u"cmp-data": list()
1448                     }
1449                 try:
1450                     if table[u"include-tests"] == u"MRR":
1451                         result = (tst_data[u"result"][u"receive-rate"],
1452                                   tst_data[u"result"][u"receive-stdev"])
1453                     elif table[u"include-tests"] == u"PDR":
1454                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1455                     elif table[u"include-tests"] == u"NDR":
1456                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1457                     else:
1458                         continue
1459
1460                     if result and \
1461                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1462                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1463                     elif result and \
1464                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1465                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1466                 except (TypeError, KeyError) as err:
1467                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1468                     # No data in output.xml for this test
1469
1470     tbl_lst = list()
1471     for tst_name in tbl_dict:
1472         item = [tbl_dict[tst_name][u"name"], ]
1473         data_r = tbl_dict[tst_name][u"ref-data"]
1474         if data_r:
1475             if table[u"include-tests"] == u"MRR":
1476                 data_r_mean = data_r[0][0]
1477                 data_r_stdev = data_r[0][1]
1478             else:
1479                 data_r_mean = mean(data_r)
1480                 data_r_stdev = stdev(data_r)
1481             item.append(round(data_r_mean / 1e6, 1))
1482             item.append(round(data_r_stdev / 1e6, 1))
1483         else:
1484             data_r_mean = None
1485             data_r_stdev = None
1486             item.extend([None, None])
1487         data_c = tbl_dict[tst_name][u"cmp-data"]
1488         if data_c:
1489             if table[u"include-tests"] == u"MRR":
1490                 data_c_mean = data_c[0][0]
1491                 data_c_stdev = data_c[0][1]
1492             else:
1493                 data_c_mean = mean(data_c)
1494                 data_c_stdev = stdev(data_c)
1495             item.append(round(data_c_mean / 1e6, 1))
1496             item.append(round(data_c_stdev / 1e6, 1))
1497         else:
1498             data_c_mean = None
1499             data_c_stdev = None
1500             item.extend([None, None])
1501         if data_r_mean is not None and data_c_mean is not None:
1502             delta, d_stdev = relative_change_stdev(
1503                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1504             )
1505             try:
1506                 item.append(round(delta))
1507             except ValueError:
1508                 item.append(delta)
1509             try:
1510                 item.append(round(d_stdev))
1511             except ValueError:
1512                 item.append(d_stdev)
1513             tbl_lst.append(item)
1514
1515     # Sort the table according to the relative change
1516     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1517
1518     # Generate csv tables:
1519     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1520         file_handler.write(u";".join(header) + u"\n")
1521         for test in tbl_lst:
1522             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1523
1524     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1525                               f"{table[u'output-file']}.txt",
1526                               delimiter=u";")
1527
1528     with open(table[u'output-file'], u'a') as txt_file:
1529         txt_file.write(legend)
1530
1531     # Generate html table:
1532     _tpc_generate_html_table(
1533         header,
1534         tbl_lst,
1535         table[u'output-file'],
1536         legend=legend
1537     )
1538
1539
1540 def table_soak_vs_ndr(table, input_data):
1541     """Generate the table(s) with algorithm: table_soak_vs_ndr
1542     specified in the specification file.
1543
1544     :param table: Table to generate.
1545     :param input_data: Data to process.
1546     :type table: pandas.Series
1547     :type input_data: InputData
1548     """
1549
1550     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1551
1552     # Transform the data
1553     logging.info(
1554         f"    Creating the data set for the {table.get(u'type', u'')} "
1555         f"{table.get(u'title', u'')}."
1556     )
1557     data = input_data.filter_data(table, continue_on_error=True)
1558
1559     # Prepare the header of the table
1560     try:
1561         header = [
1562             u"Test Case",
1563             f"Avg({table[u'reference'][u'title']})",
1564             f"Stdev({table[u'reference'][u'title']})",
1565             f"Avg({table[u'compare'][u'title']})",
1566             f"Stdev{table[u'compare'][u'title']})",
1567             u"Diff",
1568             u"Stdev(Diff)"
1569         ]
1570         header_str = u";".join(header) + u"\n"
1571         legend = (
1572             u"\nLegend:\n"
1573             f"Avg({table[u'reference'][u'title']}): "
1574             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1575             f"from a series of runs of the listed tests.\n"
1576             f"Stdev({table[u'reference'][u'title']}): "
1577             f"Standard deviation value of {table[u'reference'][u'title']} "
1578             f"[Mpps] computed from a series of runs of the listed tests.\n"
1579             f"Avg({table[u'compare'][u'title']}): "
1580             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1581             f"a series of runs of the listed tests.\n"
1582             f"Stdev({table[u'compare'][u'title']}): "
1583             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1584             f"computed from a series of runs of the listed tests.\n"
1585             f"Diff({table[u'reference'][u'title']},"
1586             f"{table[u'compare'][u'title']}): "
1587             f"Percentage change calculated for mean values.\n"
1588             u"Stdev(Diff): "
1589             u"Standard deviation of percentage change calculated for mean "
1590             u"values.\n"
1591             u":END"
1592         )
1593     except (AttributeError, KeyError) as err:
1594         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1595         return
1596
1597     # Create a list of available SOAK test results:
1598     tbl_dict = dict()
1599     for job, builds in table[u"compare"][u"data"].items():
1600         for build in builds:
1601             for tst_name, tst_data in data[job][str(build)].items():
1602                 if tst_data[u"type"] == u"SOAK":
1603                     tst_name_mod = tst_name.replace(u"-soak", u"")
1604                     if tbl_dict.get(tst_name_mod, None) is None:
1605                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1606                         nic = groups.group(0) if groups else u""
1607                         name = (
1608                             f"{nic}-"
1609                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1610                         )
1611                         tbl_dict[tst_name_mod] = {
1612                             u"name": name,
1613                             u"ref-data": list(),
1614                             u"cmp-data": list()
1615                         }
1616                     try:
1617                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1618                             tst_data[u"throughput"][u"LOWER"])
1619                     except (KeyError, TypeError):
1620                         pass
1621     tests_lst = tbl_dict.keys()
1622
1623     # Add corresponding NDR test results:
1624     for job, builds in table[u"reference"][u"data"].items():
1625         for build in builds:
1626             for tst_name, tst_data in data[job][str(build)].items():
1627                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1628                     replace(u"-mrr", u"")
1629                 if tst_name_mod not in tests_lst:
1630                     continue
1631                 try:
1632                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1633                         continue
1634                     if table[u"include-tests"] == u"MRR":
1635                         result = (tst_data[u"result"][u"receive-rate"],
1636                                   tst_data[u"result"][u"receive-stdev"])
1637                     elif table[u"include-tests"] == u"PDR":
1638                         result = \
1639                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1640                     elif table[u"include-tests"] == u"NDR":
1641                         result = \
1642                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1643                     else:
1644                         result = None
1645                     if result is not None:
1646                         tbl_dict[tst_name_mod][u"ref-data"].append(
1647                             result)
1648                 except (KeyError, TypeError):
1649                     continue
1650
1651     tbl_lst = list()
1652     for tst_name in tbl_dict:
1653         item = [tbl_dict[tst_name][u"name"], ]
1654         data_r = tbl_dict[tst_name][u"ref-data"]
1655         if data_r:
1656             if table[u"include-tests"] == u"MRR":
1657                 data_r_mean = data_r[0][0]
1658                 data_r_stdev = data_r[0][1]
1659             else:
1660                 data_r_mean = mean(data_r)
1661                 data_r_stdev = stdev(data_r)
1662             item.append(round(data_r_mean / 1e6, 1))
1663             item.append(round(data_r_stdev / 1e6, 1))
1664         else:
1665             data_r_mean = None
1666             data_r_stdev = None
1667             item.extend([None, None])
1668         data_c = tbl_dict[tst_name][u"cmp-data"]
1669         if data_c:
1670             if table[u"include-tests"] == u"MRR":
1671                 data_c_mean = data_c[0][0]
1672                 data_c_stdev = data_c[0][1]
1673             else:
1674                 data_c_mean = mean(data_c)
1675                 data_c_stdev = stdev(data_c)
1676             item.append(round(data_c_mean / 1e6, 1))
1677             item.append(round(data_c_stdev / 1e6, 1))
1678         else:
1679             data_c_mean = None
1680             data_c_stdev = None
1681             item.extend([None, None])
1682         if data_r_mean is not None and data_c_mean is not None:
1683             delta, d_stdev = relative_change_stdev(
1684                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1685             try:
1686                 item.append(round(delta))
1687             except ValueError:
1688                 item.append(delta)
1689             try:
1690                 item.append(round(d_stdev))
1691             except ValueError:
1692                 item.append(d_stdev)
1693             tbl_lst.append(item)
1694
1695     # Sort the table according to the relative change
1696     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1697
1698     # Generate csv tables:
1699     csv_file = f"{table[u'output-file']}.csv"
1700     with open(csv_file, u"wt") as file_handler:
1701         file_handler.write(header_str)
1702         for test in tbl_lst:
1703             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1704
1705     convert_csv_to_pretty_txt(
1706         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1707     )
1708     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1709         txt_file.write(legend)
1710
1711     # Generate html table:
1712     _tpc_generate_html_table(
1713         header,
1714         tbl_lst,
1715         table[u'output-file'],
1716         legend=legend
1717     )
1718
1719
1720 def table_perf_trending_dash(table, input_data):
1721     """Generate the table(s) with algorithm:
1722     table_perf_trending_dash
1723     specified in the specification file.
1724
1725     :param table: Table to generate.
1726     :param input_data: Data to process.
1727     :type table: pandas.Series
1728     :type input_data: InputData
1729     """
1730
1731     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1732
1733     # Transform the data
1734     logging.info(
1735         f"    Creating the data set for the {table.get(u'type', u'')} "
1736         f"{table.get(u'title', u'')}."
1737     )
1738     data = input_data.filter_data(table, continue_on_error=True)
1739
1740     # Prepare the header of the tables
1741     header = [
1742         u"Test Case",
1743         u"Trend [Mpps]",
1744         u"Short-Term Change [%]",
1745         u"Long-Term Change [%]",
1746         u"Regressions [#]",
1747         u"Progressions [#]"
1748     ]
1749     header_str = u",".join(header) + u"\n"
1750
1751     # Prepare data to the table:
1752     tbl_dict = dict()
1753     for job, builds in table[u"data"].items():
1754         for build in builds:
1755             for tst_name, tst_data in data[job][str(build)].items():
1756                 if tst_name.lower() in table.get(u"ignore-list", list()):
1757                     continue
1758                 if tbl_dict.get(tst_name, None) is None:
1759                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1760                     if not groups:
1761                         continue
1762                     nic = groups.group(0)
1763                     tbl_dict[tst_name] = {
1764                         u"name": f"{nic}-{tst_data[u'name']}",
1765                         u"data": OrderedDict()
1766                     }
1767                 try:
1768                     tbl_dict[tst_name][u"data"][str(build)] = \
1769                         tst_data[u"result"][u"receive-rate"]
1770                 except (TypeError, KeyError):
1771                     pass  # No data in output.xml for this test
1772
1773     tbl_lst = list()
1774     for tst_name in tbl_dict:
1775         data_t = tbl_dict[tst_name][u"data"]
1776         if len(data_t) < 2:
1777             continue
1778
1779         classification_lst, avgs = classify_anomalies(data_t)
1780
1781         win_size = min(len(data_t), table[u"window"])
1782         long_win_size = min(len(data_t), table[u"long-trend-window"])
1783
1784         try:
1785             max_long_avg = max(
1786                 [x for x in avgs[-long_win_size:-win_size]
1787                  if not isnan(x)])
1788         except ValueError:
1789             max_long_avg = nan
1790         last_avg = avgs[-1]
1791         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1792
1793         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1794             rel_change_last = nan
1795         else:
1796             rel_change_last = round(
1797                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1798
1799         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1800             rel_change_long = nan
1801         else:
1802             rel_change_long = round(
1803                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1804
1805         if classification_lst:
1806             if isnan(rel_change_last) and isnan(rel_change_long):
1807                 continue
1808             if isnan(last_avg) or isnan(rel_change_last) or \
1809                     isnan(rel_change_long):
1810                 continue
1811             tbl_lst.append(
1812                 [tbl_dict[tst_name][u"name"],
1813                  round(last_avg / 1e6, 2),
1814                  rel_change_last,
1815                  rel_change_long,
1816                  classification_lst[-win_size:].count(u"regression"),
1817                  classification_lst[-win_size:].count(u"progression")])
1818
1819     tbl_lst.sort(key=lambda rel: rel[0])
1820
1821     tbl_sorted = list()
1822     for nrr in range(table[u"window"], -1, -1):
1823         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1824         for nrp in range(table[u"window"], -1, -1):
1825             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1826             tbl_out.sort(key=lambda rel: rel[2])
1827             tbl_sorted.extend(tbl_out)
1828
1829     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1830
1831     logging.info(f"    Writing file: {file_name}")
1832     with open(file_name, u"wt") as file_handler:
1833         file_handler.write(header_str)
1834         for test in tbl_sorted:
1835             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1836
1837     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1838     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1839
1840
1841 def _generate_url(testbed, test_name):
1842     """Generate URL to a trending plot from the name of the test case.
1843
1844     :param testbed: The testbed used for testing.
1845     :param test_name: The name of the test case.
1846     :type testbed: str
1847     :type test_name: str
1848     :returns: The URL to the plot with the trending data for the given test
1849         case.
1850     :rtype str
1851     """
1852
1853     if u"x520" in test_name:
1854         nic = u"x520"
1855     elif u"x710" in test_name:
1856         nic = u"x710"
1857     elif u"xl710" in test_name:
1858         nic = u"xl710"
1859     elif u"xxv710" in test_name:
1860         nic = u"xxv710"
1861     elif u"vic1227" in test_name:
1862         nic = u"vic1227"
1863     elif u"vic1385" in test_name:
1864         nic = u"vic1385"
1865     elif u"x553" in test_name:
1866         nic = u"x553"
1867     elif u"cx556" in test_name or u"cx556a" in test_name:
1868         nic = u"cx556a"
1869     else:
1870         nic = u""
1871
1872     if u"64b" in test_name:
1873         frame_size = u"64b"
1874     elif u"78b" in test_name:
1875         frame_size = u"78b"
1876     elif u"imix" in test_name:
1877         frame_size = u"imix"
1878     elif u"9000b" in test_name:
1879         frame_size = u"9000b"
1880     elif u"1518b" in test_name:
1881         frame_size = u"1518b"
1882     elif u"114b" in test_name:
1883         frame_size = u"114b"
1884     else:
1885         frame_size = u""
1886
1887     if u"1t1c" in test_name or \
1888         (u"-1c-" in test_name and
1889          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1890         cores = u"1t1c"
1891     elif u"2t2c" in test_name or \
1892          (u"-2c-" in test_name and
1893           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1894         cores = u"2t2c"
1895     elif u"4t4c" in test_name or \
1896          (u"-4c-" in test_name and
1897           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1898         cores = u"4t4c"
1899     elif u"2t1c" in test_name or \
1900          (u"-1c-" in test_name and
1901           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1902         cores = u"2t1c"
1903     elif u"4t2c" in test_name or \
1904          (u"-2c-" in test_name and
1905           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1906         cores = u"4t2c"
1907     elif u"8t4c" in test_name or \
1908          (u"-4c-" in test_name and
1909           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1910         cores = u"8t4c"
1911     else:
1912         cores = u""
1913
1914     if u"testpmd" in test_name:
1915         driver = u"testpmd"
1916     elif u"l3fwd" in test_name:
1917         driver = u"l3fwd"
1918     elif u"avf" in test_name:
1919         driver = u"avf"
1920     elif u"rdma" in test_name:
1921         driver = u"rdma"
1922     elif u"dnv" in testbed or u"tsh" in testbed:
1923         driver = u"ixgbe"
1924     else:
1925         driver = u"dpdk"
1926
1927     if u"acl" in test_name or \
1928             u"macip" in test_name or \
1929             u"nat" in test_name or \
1930             u"policer" in test_name or \
1931             u"cop" in test_name:
1932         bsf = u"features"
1933     elif u"scale" in test_name:
1934         bsf = u"scale"
1935     elif u"base" in test_name:
1936         bsf = u"base"
1937     else:
1938         bsf = u"base"
1939
1940     if u"114b" in test_name and u"vhost" in test_name:
1941         domain = u"vts"
1942     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1943         domain = u"dpdk"
1944     elif u"memif" in test_name:
1945         domain = u"container_memif"
1946     elif u"srv6" in test_name:
1947         domain = u"srv6"
1948     elif u"vhost" in test_name:
1949         domain = u"vhost"
1950         if u"vppl2xc" in test_name:
1951             driver += u"-vpp"
1952         else:
1953             driver += u"-testpmd"
1954         if u"lbvpplacp" in test_name:
1955             bsf += u"-link-bonding"
1956     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1957         domain = u"nf_service_density_vnfc"
1958     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1959         domain = u"nf_service_density_cnfc"
1960     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1961         domain = u"nf_service_density_cnfp"
1962     elif u"ipsec" in test_name:
1963         domain = u"ipsec"
1964         if u"sw" in test_name:
1965             bsf += u"-sw"
1966         elif u"hw" in test_name:
1967             bsf += u"-hw"
1968     elif u"ethip4vxlan" in test_name:
1969         domain = u"ip4_tunnels"
1970     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1971         domain = u"ip4"
1972     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1973         domain = u"ip6"
1974     elif u"l2xcbase" in test_name or \
1975             u"l2xcscale" in test_name or \
1976             u"l2bdbasemaclrn" in test_name or \
1977             u"l2bdscale" in test_name or \
1978             u"l2patch" in test_name:
1979         domain = u"l2"
1980     else:
1981         domain = u""
1982
1983     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1984     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1985
1986     return file_name + anchor_name
1987
1988
1989 def table_perf_trending_dash_html(table, input_data):
1990     """Generate the table(s) with algorithm:
1991     table_perf_trending_dash_html specified in the specification
1992     file.
1993
1994     :param table: Table to generate.
1995     :param input_data: Data to process.
1996     :type table: dict
1997     :type input_data: InputData
1998     """
1999
2000     _ = input_data
2001
2002     if not table.get(u"testbed", None):
2003         logging.error(
2004             f"The testbed is not defined for the table "
2005             f"{table.get(u'title', u'')}."
2006         )
2007         return
2008
2009     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2010
2011     try:
2012         with open(table[u"input-file"], u'rt') as csv_file:
2013             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2014     except KeyError:
2015         logging.warning(u"The input file is not defined.")
2016         return
2017     except csv.Error as err:
2018         logging.warning(
2019             f"Not possible to process the file {table[u'input-file']}.\n"
2020             f"{repr(err)}"
2021         )
2022         return
2023
2024     # Table:
2025     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2026
2027     # Table header:
2028     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2029     for idx, item in enumerate(csv_lst[0]):
2030         alignment = u"left" if idx == 0 else u"center"
2031         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2032         thead.text = item
2033
2034     # Rows:
2035     colors = {
2036         u"regression": (
2037             u"#ffcccc",
2038             u"#ff9999"
2039         ),
2040         u"progression": (
2041             u"#c6ecc6",
2042             u"#9fdf9f"
2043         ),
2044         u"normal": (
2045             u"#e9f1fb",
2046             u"#d4e4f7"
2047         )
2048     }
2049     for r_idx, row in enumerate(csv_lst[1:]):
2050         if int(row[4]):
2051             color = u"regression"
2052         elif int(row[5]):
2053             color = u"progression"
2054         else:
2055             color = u"normal"
2056         trow = ET.SubElement(
2057             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2058         )
2059
2060         # Columns:
2061         for c_idx, item in enumerate(row):
2062             tdata = ET.SubElement(
2063                 trow,
2064                 u"td",
2065                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2066             )
2067             # Name:
2068             if c_idx == 0:
2069                 ref = ET.SubElement(
2070                     tdata,
2071                     u"a",
2072                     attrib=dict(
2073                         href=f"../trending/"
2074                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2075                     )
2076                 )
2077                 ref.text = item
2078             else:
2079                 tdata.text = item
2080     try:
2081         with open(table[u"output-file"], u'w') as html_file:
2082             logging.info(f"    Writing file: {table[u'output-file']}")
2083             html_file.write(u".. raw:: html\n\n\t")
2084             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2085             html_file.write(u"\n\t<p><br><br></p>\n")
2086     except KeyError:
2087         logging.warning(u"The output file is not defined.")
2088         return
2089
2090
2091 def table_last_failed_tests(table, input_data):
2092     """Generate the table(s) with algorithm: table_last_failed_tests
2093     specified in the specification file.
2094
2095     :param table: Table to generate.
2096     :param input_data: Data to process.
2097     :type table: pandas.Series
2098     :type input_data: InputData
2099     """
2100
2101     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2102
2103     # Transform the data
2104     logging.info(
2105         f"    Creating the data set for the {table.get(u'type', u'')} "
2106         f"{table.get(u'title', u'')}."
2107     )
2108
2109     data = input_data.filter_data(table, continue_on_error=True)
2110
2111     if data is None or data.empty:
2112         logging.warning(
2113             f"    No data for the {table.get(u'type', u'')} "
2114             f"{table.get(u'title', u'')}."
2115         )
2116         return
2117
2118     tbl_list = list()
2119     for job, builds in table[u"data"].items():
2120         for build in builds:
2121             build = str(build)
2122             try:
2123                 version = input_data.metadata(job, build).get(u"version", u"")
2124             except KeyError:
2125                 logging.error(f"Data for {job}: {build} is not present.")
2126                 return
2127             tbl_list.append(build)
2128             tbl_list.append(version)
2129             failed_tests = list()
2130             passed = 0
2131             failed = 0
2132             for tst_data in data[job][build].values:
2133                 if tst_data[u"status"] != u"FAIL":
2134                     passed += 1
2135                     continue
2136                 failed += 1
2137                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2138                 if not groups:
2139                     continue
2140                 nic = groups.group(0)
2141                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2142             tbl_list.append(str(passed))
2143             tbl_list.append(str(failed))
2144             tbl_list.extend(failed_tests)
2145
2146     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2147     logging.info(f"    Writing file: {file_name}")
2148     with open(file_name, u"wt") as file_handler:
2149         for test in tbl_list:
2150             file_handler.write(test + u'\n')
2151
2152
2153 def table_failed_tests(table, input_data):
2154     """Generate the table(s) with algorithm: table_failed_tests
2155     specified in the specification file.
2156
2157     :param table: Table to generate.
2158     :param input_data: Data to process.
2159     :type table: pandas.Series
2160     :type input_data: InputData
2161     """
2162
2163     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2164
2165     # Transform the data
2166     logging.info(
2167         f"    Creating the data set for the {table.get(u'type', u'')} "
2168         f"{table.get(u'title', u'')}."
2169     )
2170     data = input_data.filter_data(table, continue_on_error=True)
2171
2172     # Prepare the header of the tables
2173     header = [
2174         u"Test Case",
2175         u"Failures [#]",
2176         u"Last Failure [Time]",
2177         u"Last Failure [VPP-Build-Id]",
2178         u"Last Failure [CSIT-Job-Build-Id]"
2179     ]
2180
2181     # Generate the data for the table according to the model in the table
2182     # specification
2183
2184     now = dt.utcnow()
2185     timeperiod = timedelta(int(table.get(u"window", 7)))
2186
2187     tbl_dict = dict()
2188     for job, builds in table[u"data"].items():
2189         for build in builds:
2190             build = str(build)
2191             for tst_name, tst_data in data[job][build].items():
2192                 if tst_name.lower() in table.get(u"ignore-list", list()):
2193                     continue
2194                 if tbl_dict.get(tst_name, None) is None:
2195                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
2196                     if not groups:
2197                         continue
2198                     nic = groups.group(0)
2199                     tbl_dict[tst_name] = {
2200                         u"name": f"{nic}-{tst_data[u'name']}",
2201                         u"data": OrderedDict()
2202                     }
2203                 try:
2204                     generated = input_data.metadata(job, build).\
2205                         get(u"generated", u"")
2206                     if not generated:
2207                         continue
2208                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
2209                     if (now - then) <= timeperiod:
2210                         tbl_dict[tst_name][u"data"][build] = (
2211                             tst_data[u"status"],
2212                             generated,
2213                             input_data.metadata(job, build).get(u"version",
2214                                                                 u""),
2215                             build
2216                         )
2217                 except (TypeError, KeyError) as err:
2218                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2219
2220     max_fails = 0
2221     tbl_lst = list()
2222     for tst_data in tbl_dict.values():
2223         fails_nr = 0
2224         fails_last_date = u""
2225         fails_last_vpp = u""
2226         fails_last_csit = u""
2227         for val in tst_data[u"data"].values():
2228             if val[0] == u"FAIL":
2229                 fails_nr += 1
2230                 fails_last_date = val[1]
2231                 fails_last_vpp = val[2]
2232                 fails_last_csit = val[3]
2233         if fails_nr:
2234             max_fails = fails_nr if fails_nr > max_fails else max_fails
2235             tbl_lst.append(
2236                 [
2237                     tst_data[u"name"],
2238                     fails_nr,
2239                     fails_last_date,
2240                     fails_last_vpp,
2241                     f"mrr-daily-build-{fails_last_csit}"
2242                 ]
2243             )
2244
2245     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2246     tbl_sorted = list()
2247     for nrf in range(max_fails, -1, -1):
2248         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2249         tbl_sorted.extend(tbl_fails)
2250
2251     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2252     logging.info(f"    Writing file: {file_name}")
2253     with open(file_name, u"wt") as file_handler:
2254         file_handler.write(u",".join(header) + u"\n")
2255         for test in tbl_sorted:
2256             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2257
2258     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2259     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2260
2261
2262 def table_failed_tests_html(table, input_data):
2263     """Generate the table(s) with algorithm: table_failed_tests_html
2264     specified in the specification file.
2265
2266     :param table: Table to generate.
2267     :param input_data: Data to process.
2268     :type table: pandas.Series
2269     :type input_data: InputData
2270     """
2271
2272     _ = input_data
2273
2274     if not table.get(u"testbed", None):
2275         logging.error(
2276             f"The testbed is not defined for the table "
2277             f"{table.get(u'title', u'')}."
2278         )
2279         return
2280
2281     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2282
2283     try:
2284         with open(table[u"input-file"], u'rt') as csv_file:
2285             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2286     except KeyError:
2287         logging.warning(u"The input file is not defined.")
2288         return
2289     except csv.Error as err:
2290         logging.warning(
2291             f"Not possible to process the file {table[u'input-file']}.\n"
2292             f"{repr(err)}"
2293         )
2294         return
2295
2296     # Table:
2297     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2298
2299     # Table header:
2300     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2301     for idx, item in enumerate(csv_lst[0]):
2302         alignment = u"left" if idx == 0 else u"center"
2303         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2304         thead.text = item
2305
2306     # Rows:
2307     colors = (u"#e9f1fb", u"#d4e4f7")
2308     for r_idx, row in enumerate(csv_lst[1:]):
2309         background = colors[r_idx % 2]
2310         trow = ET.SubElement(
2311             failed_tests, u"tr", attrib=dict(bgcolor=background)
2312         )
2313
2314         # Columns:
2315         for c_idx, item in enumerate(row):
2316             tdata = ET.SubElement(
2317                 trow,
2318                 u"td",
2319                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2320             )
2321             # Name:
2322             if c_idx == 0:
2323                 ref = ET.SubElement(
2324                     tdata,
2325                     u"a",
2326                     attrib=dict(
2327                         href=f"../trending/"
2328                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2329                     )
2330                 )
2331                 ref.text = item
2332             else:
2333                 tdata.text = item
2334     try:
2335         with open(table[u"output-file"], u'w') as html_file:
2336             logging.info(f"    Writing file: {table[u'output-file']}")
2337             html_file.write(u".. raw:: html\n\n\t")
2338             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2339             html_file.write(u"\n\t<p><br><br></p>\n")
2340     except KeyError:
2341         logging.warning(u"The output file is not defined.")
2342         return
2343
2344
2345 def table_comparison(table, input_data):
2346     """Generate the table(s) with algorithm: table_comparison
2347     specified in the specification file.
2348
2349     :param table: Table to generate.
2350     :param input_data: Data to process.
2351     :type table: pandas.Series
2352     :type input_data: InputData
2353     """
2354     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2355
2356     # Transform the data
2357     logging.info(
2358         f"    Creating the data set for the {table.get(u'type', u'')} "
2359         f"{table.get(u'title', u'')}."
2360     )
2361
2362     columns = table.get(u"columns", None)
2363     if not columns:
2364         logging.error(
2365             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2366         )
2367         return
2368
2369     cols = list()
2370     for idx, col in enumerate(columns):
2371         if col.get(u"data", None) is None:
2372             logging.warning(f"No data for column {col.get(u'title', u'')}")
2373             continue
2374         data = input_data.filter_data(
2375             table,
2376             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2377             data=col[u"data"],
2378             continue_on_error=True
2379         )
2380         col_data = {
2381             u"title": col.get(u"title", f"Column{idx}"),
2382             u"data": dict()
2383         }
2384         for builds in data.values:
2385             for build in builds:
2386                 for tst_name, tst_data in build.items():
2387                     tst_name_mod = \
2388                         _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2389                     if col_data[u"data"].get(tst_name_mod, None) is None:
2390                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
2391                         if u"across testbeds" in table[u"title"].lower() or \
2392                                 u"across topologies" in table[u"title"].lower():
2393                             name = _tpc_modify_displayed_test_name(name)
2394                         col_data[u"data"][tst_name_mod] = {
2395                             u"name": name,
2396                             u"replace": True,
2397                             u"data": list(),
2398                             u"mean": None,
2399                             u"stdev": None
2400                         }
2401                     _tpc_insert_data(
2402                         target=col_data[u"data"][tst_name_mod][u"data"],
2403                         src=tst_data,
2404                         include_tests=table[u"include-tests"]
2405                     )
2406
2407         replacement = col.get(u"data-replacement", None)
2408         if replacement:
2409             rpl_data = input_data.filter_data(
2410                 table,
2411                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2412                 data=replacement,
2413                 continue_on_error=True
2414             )
2415             for builds in rpl_data.values:
2416                 for build in builds:
2417                     for tst_name, tst_data in build.items():
2418                         tst_name_mod = \
2419                             _tpc_modify_test_name(tst_name).\
2420                             replace(u"2n1l-", u"")
2421                         if col_data[u"data"].get(tst_name_mod, None) is None:
2422                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
2423                             if u"across testbeds" in table[u"title"].lower() \
2424                                     or u"across topologies" in \
2425                                     table[u"title"].lower():
2426                                 name = _tpc_modify_displayed_test_name(name)
2427                             col_data[u"data"][tst_name_mod] = {
2428                                 u"name": name,
2429                                 u"replace": False,
2430                                 u"data": list(),
2431                                 u"mean": None,
2432                                 u"stdev": None
2433                             }
2434                         if col_data[u"data"][tst_name_mod][u"replace"]:
2435                             col_data[u"data"][tst_name_mod][u"replace"] = False
2436                             col_data[u"data"][tst_name_mod][u"data"] = list()
2437                         _tpc_insert_data(
2438                             target=col_data[u"data"][tst_name_mod][u"data"],
2439                             src=tst_data,
2440                             include_tests=table[u"include-tests"]
2441                         )
2442
2443         if table[u"include-tests"] in (u"NDR", u"PDR"):
2444             for tst_name, tst_data in col_data[u"data"].items():
2445                 if tst_data[u"data"]:
2446                     tst_data[u"mean"] = mean(tst_data[u"data"])
2447                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
2448         elif table[u"include-tests"] in (u"MRR", ):
2449             for tst_name, tst_data in col_data[u"data"].items():
2450                 if tst_data[u"data"]:
2451                     tst_data[u"mean"] = tst_data[u"data"][0]
2452                     tst_data[u"stdev"] = tst_data[u"data"][0]
2453
2454         cols.append(col_data)
2455
2456     tbl_dict = dict()
2457     for col in cols:
2458         for tst_name, tst_data in col[u"data"].items():
2459             if tbl_dict.get(tst_name, None) is None:
2460                 tbl_dict[tst_name] = {
2461                     "name": tst_data[u"name"]
2462                 }
2463             tbl_dict[tst_name][col[u"title"]] = {
2464                 u"mean": tst_data[u"mean"],
2465                 u"stdev": tst_data[u"stdev"]
2466             }
2467
2468     tbl_lst = list()
2469     for tst_data in tbl_dict.values():
2470         row = [tst_data[u"name"], ]
2471         for col in cols:
2472             row.append(tst_data.get(col[u"title"], None))
2473         tbl_lst.append(row)
2474
2475     comparisons = table.get(u"comparisons", None)
2476     if comparisons and isinstance(comparisons, list):
2477         for idx, comp in enumerate(comparisons):
2478             try:
2479                 col_ref = int(comp[u"reference"])
2480                 col_cmp = int(comp[u"compare"])
2481             except KeyError:
2482                 logging.warning(u"Comparison: No references defined! Skipping.")
2483                 comparisons.pop(idx)
2484                 continue
2485             if not (0 < col_ref <= len(cols) and
2486                     0 < col_cmp <= len(cols)) or \
2487                     col_ref == col_cmp:
2488                 logging.warning(f"Wrong values of reference={col_ref} "
2489                                 f"and/or compare={col_cmp}. Skipping.")
2490                 comparisons.pop(idx)
2491                 continue
2492
2493     tbl_cmp_lst = list()
2494     if comparisons:
2495         for row in tbl_lst:
2496             new_row = deepcopy(row)
2497             add_to_tbl = False
2498             for comp in comparisons:
2499                 ref_itm = row[int(comp[u"reference"])]
2500                 if ref_itm is None and \
2501                         comp.get(u"reference-alt", None) is not None:
2502                     ref_itm = row[int(comp[u"reference-alt"])]
2503                 cmp_itm = row[int(comp[u"compare"])]
2504                 if ref_itm is not None and cmp_itm is not None and \
2505                         ref_itm[u"mean"] is not None and \
2506                         cmp_itm[u"mean"] is not None and \
2507                         ref_itm[u"stdev"] is not None and \
2508                         cmp_itm[u"stdev"] is not None:
2509                     delta, d_stdev = relative_change_stdev(
2510                         ref_itm[u"mean"], cmp_itm[u"mean"],
2511                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
2512                     )
2513                     new_row.append(
2514                         {
2515                             u"mean": delta * 1e6,
2516                             u"stdev": d_stdev * 1e6
2517                         }
2518                     )
2519                     add_to_tbl = True
2520                 else:
2521                     new_row.append(None)
2522             if add_to_tbl:
2523                 tbl_cmp_lst.append(new_row)
2524
2525     tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2526     tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2527
2528     rcas = list()
2529     rca_in = table.get(u"rca", None)
2530     if rca_in and isinstance(rca_in, list):
2531         for idx, itm in enumerate(rca_in):
2532             try:
2533                 with open(itm.get(u"data", u""), u"r") as rca_file:
2534                     rcas.append(
2535                         {
2536                             u"title": itm.get(u"title", f"RCA{idx}"),
2537                             u"data": load(rca_file, Loader=FullLoader)
2538                         }
2539                     )
2540             except (YAMLError, IOError) as err:
2541                 logging.warning(
2542                     f"The RCA file {itm.get(u'data', u'')} does not exist or "
2543                     f"it is corrupted!"
2544                 )
2545                 logging.debug(repr(err))
2546
2547     tbl_for_csv = list()
2548     for line in tbl_cmp_lst:
2549
2550         row = [line[0], ]
2551
2552         for idx, rca in enumerate(rcas):
2553             rca_nr = rca[u"data"].get(row[0 + idx], u"-")
2554             row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2555
2556         for idx, itm in enumerate(line[1:]):
2557             if itm is None:
2558                 row.append(u"NT")
2559                 row.append(u"NT")
2560             else:
2561                 row.append(round(float(itm[u'mean']) / 1e6, 3))
2562                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2563         tbl_for_csv.append(row)
2564
2565     header_csv = [rca[u"title"] for rca in rcas]
2566     header_csv.append(u"Test Case")
2567     for col in cols:
2568         header_csv.append(f"Avg({col[u'title']})")
2569         header_csv.append(f"Stdev({col[u'title']})")
2570     for comp in comparisons:
2571         header_csv.append(
2572             f"Avg({cols[comp[u'reference'] - 1][u'title']},"
2573             f"{cols[comp[u'compare'] - 1][u'title']})"
2574         )
2575         header_csv.append(
2576             f"Stdev({cols[comp[u'reference'] - 1][u'title']},"
2577             f"{cols[comp[u'compare'] - 1][u'title']})"
2578         )
2579
2580     csv_file = f"{table[u'output-file']}-csv.csv"
2581     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2582         file_handler.write(u";".join(header_csv) + u"\n")
2583         for test in tbl_for_csv:
2584             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2585
2586     tbl_final = list()
2587     for line in tbl_cmp_lst:
2588         row = [line[0], ]
2589         for idx, rca in enumerate(rcas):
2590             rca_nr = rca[u"data"].get(row[0 + idx], u"-")
2591             row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2592         for idx, itm in enumerate(line[1:]):
2593             if itm is None:
2594                 row.append(u"NT")
2595             else:
2596                 if idx < len(cols):
2597                     row.append(
2598                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
2599                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2600                         replace(u"nan", u"NaN")
2601                     )
2602                 else:
2603                     row.append(
2604                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2605                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2606                         replace(u"nan", u"NaN")
2607                     )
2608         tbl_final.append(row)
2609
2610     header = [rca[u"title"] for rca in rcas]
2611     header.append(u"Test Case")
2612     header.extend([col[u"title"] for col in cols])
2613     header.extend(
2614         [f"Diff({cols[comp[u'reference'] - 1][u'title']},"
2615          f"{cols[comp[u'compare'] - 1][u'title']})"
2616          for comp in comparisons]
2617     )
2618
2619     # Generate csv tables:
2620     csv_file = f"{table[u'output-file']}.csv"
2621     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2622         file_handler.write(u";".join(header) + u"\n")
2623         for test in tbl_final:
2624             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2625
2626     # Generate txt table:
2627     txt_file_name = f"{table[u'output-file']}.txt"
2628     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2629
2630     # Generate rst table:
2631     file_name = table[u'output-file'].split(u"/")[-1]
2632     if u"vpp" in table[u'output-file']:
2633         path = u"_tmp/src/vpp_performance_tests/comparisons/"
2634     else:
2635         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
2636     rst_file_name = f"{path}{file_name}-txt.rst"
2637     csv_file_name = f"{path}{file_name}.csv"
2638     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2639         file_handler.write(
2640             u",".join(
2641                 [f'"{itm}"' for itm in header]
2642             ) + u"\n"
2643         )
2644         for test in tbl_final:
2645             file_handler.write(
2646                 u",".join(
2647                     [f'"{itm}"' for itm in test]
2648                 ) + u"\n"
2649             )
2650
2651     convert_csv_to_pretty_txt(csv_file_name, rst_file_name, delimiter=u",")
2652
2653     legend = u"\nLegend:\n"
2654     for idx, rca in enumerate(rcas):
2655         try:
2656             desc = (
2657                 f"Diff({cols[comparisons[idx][u'reference'] - 1][u'title']},"
2658                 f"{cols[comparisons[idx][u'compare'] - 1][u'title']})\n"
2659             )
2660         except (KeyError, IndexError):
2661             desc = u"\n"
2662         legend += f"{rca[u'title']}: Root Cause Analysis for {desc}"
2663     legend += (
2664         u"First part of the result is a mean value [Mpps].\n"
2665         f"Second part of the result following '\u00B1' is a standard "
2666         u"deviation [Mpps].\n"
2667         u"First part of Diff is a relative change of mean values [%].\n"
2668         f"Second part of Diff following '\u00B1' is a standard deviation "
2669         u"of the Diff [percentual points].\n"
2670         u"NT: Not tested.\n"
2671     )
2672
2673     footnote = u""
2674     for rca in rcas:
2675         footnote += f"\n{rca[u'title']}:\n"
2676         footnote += rca[u"data"].get(u"footnote", u"")
2677
2678     with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2679         txt_file.write(legend)
2680         if footnote:
2681             txt_file.write(footnote)
2682         txt_file.write(u":END")
2683
2684     with open(rst_file_name, u'a', encoding='utf-8') as txt_file:
2685         txt_file.write(legend.replace(u"\n", u" |br| "))
2686         if footnote:
2687             txt_file.write(footnote.replace(u"\n", u" |br| "))
2688         txt_file.write(u":END")
2689
2690     # Generate html table:
2691     _tpc_generate_html_table(
2692         header,
2693         tbl_final,
2694         table[u'output-file'],
2695         legend=legend,
2696         footnote=footnote,
2697         sort_data=False
2698     )