Trending: NDRPDR weekly comparison
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_perf_comparison": table_perf_comparison,
54         u"table_perf_comparison_nic": table_perf_comparison_nic,
55         u"table_nics_comparison": table_nics_comparison,
56         u"table_soak_vs_ndr": table_soak_vs_ndr,
57         u"table_perf_trending_dash": table_perf_trending_dash,
58         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59         u"table_last_failed_tests": table_last_failed_tests,
60         u"table_failed_tests": table_failed_tests,
61         u"table_failed_tests_html": table_failed_tests_html,
62         u"table_oper_data_html": table_oper_data_html,
63         u"table_comparison": table_comparison,
64         u"table_weekly_comparison": table_weekly_comparison
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_oper_data_html(table, input_data):
82     """Generate the table(s) with algorithm: html_table_oper_data
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
92     # Transform the data
93     logging.info(
94         f"    Creating the data set for the {table.get(u'type', u'')} "
95         f"{table.get(u'title', u'')}."
96     )
97     data = input_data.filter_data(
98         table,
99         params=[u"name", u"parent", u"show-run", u"type"],
100         continue_on_error=True
101     )
102     if data.empty:
103         return
104     data = input_data.merge_data(data)
105
106     sort_tests = table.get(u"sort", None)
107     if sort_tests:
108         args = dict(
109             inplace=True,
110             ascending=(sort_tests == u"ascending")
111         )
112         data.sort_index(**args)
113
114     suites = input_data.filter_data(
115         table,
116         continue_on_error=True,
117         data_set=u"suites"
118     )
119     if suites.empty:
120         return
121     suites = input_data.merge_data(suites)
122
123     def _generate_html_table(tst_data):
124         """Generate an HTML table with operational data for the given test.
125
126         :param tst_data: Test data to be used to generate the table.
127         :type tst_data: pandas.Series
128         :returns: HTML table with operational data.
129         :rtype: str
130         """
131
132         colors = {
133             u"header": u"#7eade7",
134             u"empty": u"#ffffff",
135             u"body": (u"#e9f1fb", u"#d4e4f7")
136         }
137
138         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = tst_data[u"name"]
145
146         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
147         thead = ET.SubElement(
148             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149         )
150         thead.text = u"\t"
151
152         if tst_data.get(u"show-run", u"No Data") == u"No Data":
153             trow = ET.SubElement(
154                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155             )
156             tcol = ET.SubElement(
157                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158             )
159             tcol.text = u"No Data"
160
161             trow = ET.SubElement(
162                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163             )
164             thead = ET.SubElement(
165                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166             )
167             font = ET.SubElement(
168                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
169             )
170             font.text = u"."
171             return str(ET.tostring(tbl, encoding=u"unicode"))
172
173         tbl_hdr = (
174             u"Name",
175             u"Nr of Vectors",
176             u"Nr of Packets",
177             u"Suspends",
178             u"Cycles per Packet",
179             u"Average Vector Size"
180         )
181
182         for dut_data in tst_data[u"show-run"].values():
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185             )
186             tcol = ET.SubElement(
187                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188             )
189             if dut_data.get(u"threads", None) is None:
190                 tcol.text = u"No Data"
191                 continue
192
193             bold = ET.SubElement(tcol, u"b")
194             bold.text = (
195                 f"Host IP: {dut_data.get(u'host', '')}, "
196                 f"Socket: {dut_data.get(u'socket', '')}"
197             )
198             trow = ET.SubElement(
199                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
200             )
201             thead = ET.SubElement(
202                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203             )
204             thead.text = u"\t"
205
206             for thread_nr, thread in dut_data[u"threads"].items():
207                 trow = ET.SubElement(
208                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
209                 )
210                 tcol = ET.SubElement(
211                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
212                 )
213                 bold = ET.SubElement(tcol, u"b")
214                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
215                 trow = ET.SubElement(
216                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
217                 )
218                 for idx, col in enumerate(tbl_hdr):
219                     tcol = ET.SubElement(
220                         trow, u"td",
221                         attrib=dict(align=u"right" if idx else u"left")
222                     )
223                     font = ET.SubElement(
224                         tcol, u"font", attrib=dict(size=u"2")
225                     )
226                     bold = ET.SubElement(font, u"b")
227                     bold.text = col
228                 for row_nr, row in enumerate(thread):
229                     trow = ET.SubElement(
230                         tbl, u"tr",
231                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
232                     )
233                     for idx, col in enumerate(row):
234                         tcol = ET.SubElement(
235                             trow, u"td",
236                             attrib=dict(align=u"right" if idx else u"left")
237                         )
238                         font = ET.SubElement(
239                             tcol, u"font", attrib=dict(size=u"2")
240                         )
241                         if isinstance(col, float):
242                             font.text = f"{col:.2f}"
243                         else:
244                             font.text = str(col)
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
247                 )
248                 thead = ET.SubElement(
249                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 thead.text = u"\t"
252
253         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
254         thead = ET.SubElement(
255             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
256         )
257         font = ET.SubElement(
258             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259         )
260         font.text = u"."
261
262         return str(ET.tostring(tbl, encoding=u"unicode"))
263
264     for suite in suites.values:
265         html_table = str()
266         for test_data in data.values:
267             if test_data[u"parent"] not in suite[u"name"]:
268                 continue
269             html_table += _generate_html_table(test_data)
270         if not html_table:
271             continue
272         try:
273             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
274             with open(f"{file_name}", u'w') as html_file:
275                 logging.info(f"    Writing file: {file_name}")
276                 html_file.write(u".. raw:: html\n\n\t")
277                 html_file.write(html_table)
278                 html_file.write(u"\n\t<p><br><br></p>\n")
279         except KeyError:
280             logging.warning(u"The output file is not defined.")
281             return
282     logging.info(u"  Done.")
283
284
285 def table_merged_details(table, input_data):
286     """Generate the table(s) with algorithm: table_merged_details
287     specified in the specification file.
288
289     :param table: Table to generate.
290     :param input_data: Data to process.
291     :type table: pandas.Series
292     :type input_data: InputData
293     """
294
295     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
296
297     # Transform the data
298     logging.info(
299         f"    Creating the data set for the {table.get(u'type', u'')} "
300         f"{table.get(u'title', u'')}."
301     )
302     data = input_data.filter_data(table, continue_on_error=True)
303     data = input_data.merge_data(data)
304
305     sort_tests = table.get(u"sort", None)
306     if sort_tests:
307         args = dict(
308             inplace=True,
309             ascending=(sort_tests == u"ascending")
310         )
311         data.sort_index(**args)
312
313     suites = input_data.filter_data(
314         table, continue_on_error=True, data_set=u"suites")
315     suites = input_data.merge_data(suites)
316
317     # Prepare the header of the tables
318     header = list()
319     for column in table[u"columns"]:
320         header.append(
321             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
322         )
323
324     for suite in suites.values:
325         # Generate data
326         suite_name = suite[u"name"]
327         table_lst = list()
328         for test in data.keys():
329             if data[test][u"parent"] not in suite_name:
330                 continue
331             row_lst = list()
332             for column in table[u"columns"]:
333                 try:
334                     col_data = str(data[test][column[
335                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
336                     # Do not include tests with "Test Failed" in test message
337                     if u"Test Failed" in col_data:
338                         continue
339                     col_data = col_data.replace(
340                         u"No Data", u"Not Captured     "
341                     )
342                     if column[u"data"].split(u" ")[1] in (u"name", ):
343                         if len(col_data) > 30:
344                             col_data_lst = col_data.split(u"-")
345                             half = int(len(col_data_lst) / 2)
346                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
347                                        f"- |br| " \
348                                        f"{u'-'.join(col_data_lst[half:])}"
349                         col_data = f" |prein| {col_data} |preout| "
350                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
351                         # Temporary solution: remove NDR results from message:
352                         if bool(table.get(u'remove-ndr', False)):
353                             try:
354                                 col_data = col_data.split(u" |br| ", 1)[1]
355                             except IndexError:
356                                 pass
357                         col_data = f" |prein| {col_data} |preout| "
358                     elif column[u"data"].split(u" ")[1] in \
359                             (u"conf-history", u"show-run"):
360                         col_data = col_data.replace(u" |br| ", u"", 1)
361                         col_data = f" |prein| {col_data[:-5]} |preout| "
362                     row_lst.append(f'"{col_data}"')
363                 except KeyError:
364                     row_lst.append(u'"Not captured"')
365             if len(row_lst) == len(table[u"columns"]):
366                 table_lst.append(row_lst)
367
368         # Write the data to file
369         if table_lst:
370             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
371             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
372             logging.info(f"      Writing file: {file_name}")
373             with open(file_name, u"wt") as file_handler:
374                 file_handler.write(u",".join(header) + u"\n")
375                 for item in table_lst:
376                     file_handler.write(u",".join(item) + u"\n")
377
378     logging.info(u"  Done.")
379
380
381 def _tpc_modify_test_name(test_name, ignore_nic=False):
382     """Modify a test name by replacing its parts.
383
384     :param test_name: Test name to be modified.
385     :param ignore_nic: If True, NIC is removed from TC name.
386     :type test_name: str
387     :type ignore_nic: bool
388     :returns: Modified test name.
389     :rtype: str
390     """
391     test_name_mod = test_name.\
392         replace(u"-ndrpdrdisc", u""). \
393         replace(u"-ndrpdr", u"").\
394         replace(u"-pdrdisc", u""). \
395         replace(u"-ndrdisc", u"").\
396         replace(u"-pdr", u""). \
397         replace(u"-ndr", u""). \
398         replace(u"1t1c", u"1c").\
399         replace(u"2t1c", u"1c"). \
400         replace(u"2t2c", u"2c").\
401         replace(u"4t2c", u"2c"). \
402         replace(u"4t4c", u"4c").\
403         replace(u"8t4c", u"4c")
404
405     if ignore_nic:
406         return re.sub(REGEX_NIC, u"", test_name_mod)
407     return test_name_mod
408
409
410 def _tpc_modify_displayed_test_name(test_name):
411     """Modify a test name which is displayed in a table by replacing its parts.
412
413     :param test_name: Test name to be modified.
414     :type test_name: str
415     :returns: Modified test name.
416     :rtype: str
417     """
418     return test_name.\
419         replace(u"1t1c", u"1c").\
420         replace(u"2t1c", u"1c"). \
421         replace(u"2t2c", u"2c").\
422         replace(u"4t2c", u"2c"). \
423         replace(u"4t4c", u"4c").\
424         replace(u"8t4c", u"4c")
425
426
427 def _tpc_insert_data(target, src, include_tests):
428     """Insert src data to the target structure.
429
430     :param target: Target structure where the data is placed.
431     :param src: Source data to be placed into the target stucture.
432     :param include_tests: Which results will be included (MRR, NDR, PDR).
433     :type target: list
434     :type src: dict
435     :type include_tests: str
436     """
437     try:
438         if include_tests == u"MRR":
439             target.append(
440                 (
441                     src[u"result"][u"receive-rate"],
442                     src[u"result"][u"receive-stdev"]
443                 )
444             )
445         elif include_tests == u"PDR":
446             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
447         elif include_tests == u"NDR":
448             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
449     except (KeyError, TypeError):
450         pass
451
452
453 def _tpc_sort_table(table):
454     """Sort the table this way:
455
456     1. Put "New in CSIT-XXXX" at the first place.
457     2. Put "See footnote" at the second place.
458     3. Sort the rest by "Delta".
459
460     :param table: Table to sort.
461     :type table: list
462     :returns: Sorted table.
463     :rtype: list
464     """
465
466     tbl_new = list()
467     tbl_see = list()
468     tbl_delta = list()
469     for item in table:
470         if isinstance(item[-1], str):
471             if u"New in CSIT" in item[-1]:
472                 tbl_new.append(item)
473             elif u"See footnote" in item[-1]:
474                 tbl_see.append(item)
475         else:
476             tbl_delta.append(item)
477
478     # Sort the tables:
479     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
480     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
481     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
482     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
483     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
484
485     # Put the tables together:
486     table = list()
487     # We do not want "New in CSIT":
488     # table.extend(tbl_new)
489     table.extend(tbl_see)
490     table.extend(tbl_delta)
491
492     return table
493
494
495 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
496                              footnote=u"", sort_data=True, title=u"",
497                              generate_rst=True):
498     """Generate html table from input data with simple sorting possibility.
499
500     :param header: Table header.
501     :param data: Input data to be included in the table. It is a list of lists.
502         Inner lists are rows in the table. All inner lists must be of the same
503         length. The length of these lists must be the same as the length of the
504         header.
505     :param out_file_name: The name (relative or full path) where the
506         generated html table is written.
507     :param legend: The legend to display below the table.
508     :param footnote: The footnote to display below the table (and legend).
509     :param sort_data: If True the data sorting is enabled.
510     :param title: The table (and file) title.
511     :param generate_rst: If True, wrapping rst file is generated.
512     :type header: list
513     :type data: list of lists
514     :type out_file_name: str
515     :type legend: str
516     :type footnote: str
517     :type sort_data: bool
518     :type title: str
519     :type generate_rst: bool
520     """
521
522     try:
523         idx = header.index(u"Test Case")
524     except ValueError:
525         idx = 0
526     params = {
527         u"align-hdr": (
528             [u"left", u"right"],
529             [u"left", u"left", u"right"],
530             [u"left", u"left", u"left", u"right"]
531         ),
532         u"align-itm": (
533             [u"left", u"right"],
534             [u"left", u"left", u"right"],
535             [u"left", u"left", u"left", u"right"]
536         ),
537         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
538     }
539
540     df_data = pd.DataFrame(data, columns=header)
541
542     if sort_data:
543         df_sorted = [df_data.sort_values(
544             by=[key, header[idx]], ascending=[True, True]
545             if key != header[idx] else [False, True]) for key in header]
546         df_sorted_rev = [df_data.sort_values(
547             by=[key, header[idx]], ascending=[False, True]
548             if key != header[idx] else [True, True]) for key in header]
549         df_sorted.extend(df_sorted_rev)
550     else:
551         df_sorted = df_data
552
553     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
554                    for idx in range(len(df_data))]]
555     table_header = dict(
556         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
557         fill_color=u"#7eade7",
558         align=params[u"align-hdr"][idx],
559         font=dict(
560             family=u"Courier New",
561             size=12
562         )
563     )
564
565     fig = go.Figure()
566
567     if sort_data:
568         for table in df_sorted:
569             columns = [table.get(col) for col in header]
570             fig.add_trace(
571                 go.Table(
572                     columnwidth=params[u"width"][idx],
573                     header=table_header,
574                     cells=dict(
575                         values=columns,
576                         fill_color=fill_color,
577                         align=params[u"align-itm"][idx],
578                         font=dict(
579                             family=u"Courier New",
580                             size=12
581                         )
582                     )
583                 )
584             )
585
586         buttons = list()
587         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
588         menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
589         menu_items.extend(menu_items_rev)
590         for idx, hdr in enumerate(menu_items):
591             visible = [False, ] * len(menu_items)
592             visible[idx] = True
593             buttons.append(
594                 dict(
595                     label=hdr.replace(u" [Mpps]", u""),
596                     method=u"update",
597                     args=[{u"visible": visible}],
598                 )
599             )
600
601         fig.update_layout(
602             updatemenus=[
603                 go.layout.Updatemenu(
604                     type=u"dropdown",
605                     direction=u"down",
606                     x=0.0,
607                     xanchor=u"left",
608                     y=1.002,
609                     yanchor=u"bottom",
610                     active=len(menu_items) - 1,
611                     buttons=list(buttons)
612                 )
613             ],
614         )
615     else:
616         fig.add_trace(
617             go.Table(
618                 columnwidth=params[u"width"][idx],
619                 header=table_header,
620                 cells=dict(
621                     values=[df_sorted.get(col) for col in header],
622                     fill_color=fill_color,
623                     align=params[u"align-itm"][idx],
624                     font=dict(
625                         family=u"Courier New",
626                         size=12
627                     )
628                 )
629             )
630         )
631
632     ploff.plot(
633         fig,
634         show_link=False,
635         auto_open=False,
636         filename=f"{out_file_name}_in.html"
637     )
638
639     if not generate_rst:
640         return
641
642     file_name = out_file_name.split(u"/")[-1]
643     if u"vpp" in out_file_name:
644         path = u"_tmp/src/vpp_performance_tests/comparisons/"
645     else:
646         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
647     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
648         rst_file.write(
649             u"\n"
650             u".. |br| raw:: html\n\n    <br />\n\n\n"
651             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
652             u".. |preout| raw:: html\n\n    </pre>\n\n"
653         )
654         if title:
655             rst_file.write(f"{title}\n")
656             rst_file.write(f"{u'`' * len(title)}\n\n")
657         rst_file.write(
658             u".. raw:: html\n\n"
659             f'    <iframe frameborder="0" scrolling="no" '
660             f'width="1600" height="1200" '
661             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
662             f'</iframe>\n\n'
663         )
664         if legend:
665             rst_file.write(legend[1:].replace(u"\n", u" |br| "))
666         if footnote:
667             rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
668
669
670 def table_perf_comparison(table, input_data):
671     """Generate the table(s) with algorithm: table_perf_comparison
672     specified in the specification file.
673
674     :param table: Table to generate.
675     :param input_data: Data to process.
676     :type table: pandas.Series
677     :type input_data: InputData
678     """
679
680     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
681
682     # Transform the data
683     logging.info(
684         f"    Creating the data set for the {table.get(u'type', u'')} "
685         f"{table.get(u'title', u'')}."
686     )
687     data = input_data.filter_data(table, continue_on_error=True)
688
689     # Prepare the header of the tables
690     try:
691         header = [u"Test Case", ]
692         legend = u"\nLegend:\n"
693
694         rca_data = None
695         rca = table.get(u"rca", None)
696         if rca:
697             try:
698                 with open(rca.get(u"data-file", u""), u"r") as rca_file:
699                     rca_data = load(rca_file, Loader=FullLoader)
700                 header.insert(0, rca.get(u"title", u"RCA"))
701                 legend += (
702                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
703                 )
704             except (YAMLError, IOError) as err:
705                 logging.warning(repr(err))
706
707         history = table.get(u"history", list())
708         for item in history:
709             header.extend(
710                 [
711                     f"{item[u'title']} Avg({table[u'include-tests']})",
712                     f"{item[u'title']} Stdev({table[u'include-tests']})"
713                 ]
714             )
715             legend += (
716                 f"{item[u'title']} Avg({table[u'include-tests']}): "
717                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
718                 f"a series of runs of the listed tests executed against "
719                 f"{item[u'title']}.\n"
720                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
721                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
722                 f"computed from a series of runs of the listed tests executed "
723                 f"against {item[u'title']}.\n"
724             )
725         header.extend(
726             [
727                 f"{table[u'reference'][u'title']} "
728                 f"Avg({table[u'include-tests']})",
729                 f"{table[u'reference'][u'title']} "
730                 f"Stdev({table[u'include-tests']})",
731                 f"{table[u'compare'][u'title']} "
732                 f"Avg({table[u'include-tests']})",
733                 f"{table[u'compare'][u'title']} "
734                 f"Stdev({table[u'include-tests']})",
735                 f"Diff({table[u'reference'][u'title']},"
736                 f"{table[u'compare'][u'title']})",
737                 u"Stdev(Diff)"
738             ]
739         )
740         header_str = u";".join(header) + u"\n"
741         legend += (
742             f"{table[u'reference'][u'title']} "
743             f"Avg({table[u'include-tests']}): "
744             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
745             f"series of runs of the listed tests executed against "
746             f"{table[u'reference'][u'title']}.\n"
747             f"{table[u'reference'][u'title']} "
748             f"Stdev({table[u'include-tests']}): "
749             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
750             f"computed from a series of runs of the listed tests executed "
751             f"against {table[u'reference'][u'title']}.\n"
752             f"{table[u'compare'][u'title']} "
753             f"Avg({table[u'include-tests']}): "
754             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
755             f"series of runs of the listed tests executed against "
756             f"{table[u'compare'][u'title']}.\n"
757             f"{table[u'compare'][u'title']} "
758             f"Stdev({table[u'include-tests']}): "
759             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
760             f"computed from a series of runs of the listed tests executed "
761             f"against {table[u'compare'][u'title']}.\n"
762             f"Diff({table[u'reference'][u'title']},"
763             f"{table[u'compare'][u'title']}): "
764             f"Percentage change calculated for mean values.\n"
765             u"Stdev(Diff): "
766             u"Standard deviation of percentage change calculated for mean "
767             u"values.\n"
768             u"NT: Not Tested\n"
769         )
770     except (AttributeError, KeyError) as err:
771         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
772         return
773
774     # Prepare data to the table:
775     tbl_dict = dict()
776     for job, builds in table[u"reference"][u"data"].items():
777         for build in builds:
778             for tst_name, tst_data in data[job][str(build)].items():
779                 tst_name_mod = _tpc_modify_test_name(tst_name)
780                 if (u"across topologies" in table[u"title"].lower() or
781                         (u" 3n-" in table[u"title"].lower() and
782                          u" 2n-" in table[u"title"].lower())):
783                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
784                 if tbl_dict.get(tst_name_mod, None) is None:
785                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
786                     if u"across testbeds" in table[u"title"].lower() or \
787                             u"across topologies" in table[u"title"].lower():
788                         name = _tpc_modify_displayed_test_name(name)
789                     tbl_dict[tst_name_mod] = {
790                         u"name": name,
791                         u"replace-ref": True,
792                         u"replace-cmp": True,
793                         u"ref-data": list(),
794                         u"cmp-data": list()
795                     }
796                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
797                                  src=tst_data,
798                                  include_tests=table[u"include-tests"])
799
800     replacement = table[u"reference"].get(u"data-replacement", None)
801     if replacement:
802         rpl_data = input_data.filter_data(
803             table, data=replacement, continue_on_error=True)
804         for job, builds in replacement.items():
805             for build in builds:
806                 for tst_name, tst_data in rpl_data[job][str(build)].items():
807                     tst_name_mod = _tpc_modify_test_name(tst_name)
808                     if (u"across topologies" in table[u"title"].lower() or
809                             (u" 3n-" in table[u"title"].lower() and
810                              u" 2n-" in table[u"title"].lower())):
811                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
812                     if tbl_dict.get(tst_name_mod, None) is None:
813                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
814                         if u"across testbeds" in table[u"title"].lower() or \
815                                 u"across topologies" in table[u"title"].lower():
816                             name = _tpc_modify_displayed_test_name(name)
817                         tbl_dict[tst_name_mod] = {
818                             u"name": name,
819                             u"replace-ref": False,
820                             u"replace-cmp": True,
821                             u"ref-data": list(),
822                             u"cmp-data": list()
823                         }
824                     if tbl_dict[tst_name_mod][u"replace-ref"]:
825                         tbl_dict[tst_name_mod][u"replace-ref"] = False
826                         tbl_dict[tst_name_mod][u"ref-data"] = list()
827
828                     _tpc_insert_data(
829                         target=tbl_dict[tst_name_mod][u"ref-data"],
830                         src=tst_data,
831                         include_tests=table[u"include-tests"]
832                     )
833
834     for job, builds in table[u"compare"][u"data"].items():
835         for build in builds:
836             for tst_name, tst_data in data[job][str(build)].items():
837                 tst_name_mod = _tpc_modify_test_name(tst_name)
838                 if (u"across topologies" in table[u"title"].lower() or
839                         (u" 3n-" in table[u"title"].lower() and
840                          u" 2n-" in table[u"title"].lower())):
841                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
842                 if tbl_dict.get(tst_name_mod, None) is None:
843                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
844                     if u"across testbeds" in table[u"title"].lower() or \
845                             u"across topologies" in table[u"title"].lower():
846                         name = _tpc_modify_displayed_test_name(name)
847                     tbl_dict[tst_name_mod] = {
848                         u"name": name,
849                         u"replace-ref": False,
850                         u"replace-cmp": True,
851                         u"ref-data": list(),
852                         u"cmp-data": list()
853                     }
854                 _tpc_insert_data(
855                     target=tbl_dict[tst_name_mod][u"cmp-data"],
856                     src=tst_data,
857                     include_tests=table[u"include-tests"]
858                 )
859
860     replacement = table[u"compare"].get(u"data-replacement", None)
861     if replacement:
862         rpl_data = input_data.filter_data(
863             table, data=replacement, continue_on_error=True)
864         for job, builds in replacement.items():
865             for build in builds:
866                 for tst_name, tst_data in rpl_data[job][str(build)].items():
867                     tst_name_mod = _tpc_modify_test_name(tst_name)
868                     if (u"across topologies" in table[u"title"].lower() or
869                             (u" 3n-" in table[u"title"].lower() and
870                              u" 2n-" in table[u"title"].lower())):
871                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
872                     if tbl_dict.get(tst_name_mod, None) is None:
873                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
874                         if u"across testbeds" in table[u"title"].lower() or \
875                                 u"across topologies" in table[u"title"].lower():
876                             name = _tpc_modify_displayed_test_name(name)
877                         tbl_dict[tst_name_mod] = {
878                             u"name": name,
879                             u"replace-ref": False,
880                             u"replace-cmp": False,
881                             u"ref-data": list(),
882                             u"cmp-data": list()
883                         }
884                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
885                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
886                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
887
888                     _tpc_insert_data(
889                         target=tbl_dict[tst_name_mod][u"cmp-data"],
890                         src=tst_data,
891                         include_tests=table[u"include-tests"]
892                     )
893
894     for item in history:
895         for job, builds in item[u"data"].items():
896             for build in builds:
897                 for tst_name, tst_data in data[job][str(build)].items():
898                     tst_name_mod = _tpc_modify_test_name(tst_name)
899                     if (u"across topologies" in table[u"title"].lower() or
900                             (u" 3n-" in table[u"title"].lower() and
901                              u" 2n-" in table[u"title"].lower())):
902                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
903                     if tbl_dict.get(tst_name_mod, None) is None:
904                         continue
905                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
906                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
907                     if tbl_dict[tst_name_mod][u"history"].\
908                             get(item[u"title"], None) is None:
909                         tbl_dict[tst_name_mod][u"history"][item[
910                             u"title"]] = list()
911                     try:
912                         if table[u"include-tests"] == u"MRR":
913                             res = (tst_data[u"result"][u"receive-rate"],
914                                    tst_data[u"result"][u"receive-stdev"])
915                         elif table[u"include-tests"] == u"PDR":
916                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
917                         elif table[u"include-tests"] == u"NDR":
918                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
919                         else:
920                             continue
921                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
922                             append(res)
923                     except (TypeError, KeyError):
924                         pass
925
926     tbl_lst = list()
927     for tst_name in tbl_dict:
928         item = [tbl_dict[tst_name][u"name"], ]
929         if history:
930             if tbl_dict[tst_name].get(u"history", None) is not None:
931                 for hist_data in tbl_dict[tst_name][u"history"].values():
932                     if hist_data:
933                         if table[u"include-tests"] == u"MRR":
934                             item.append(round(hist_data[0][0] / 1e6, 1))
935                             item.append(round(hist_data[0][1] / 1e6, 1))
936                         else:
937                             item.append(round(mean(hist_data) / 1e6, 1))
938                             item.append(round(stdev(hist_data) / 1e6, 1))
939                     else:
940                         item.extend([u"NT", u"NT"])
941             else:
942                 item.extend([u"NT", u"NT"])
943         data_r = tbl_dict[tst_name][u"ref-data"]
944         if data_r:
945             if table[u"include-tests"] == u"MRR":
946                 data_r_mean = data_r[0][0]
947                 data_r_stdev = data_r[0][1]
948             else:
949                 data_r_mean = mean(data_r)
950                 data_r_stdev = stdev(data_r)
951             item.append(round(data_r_mean / 1e6, 1))
952             item.append(round(data_r_stdev / 1e6, 1))
953         else:
954             data_r_mean = None
955             data_r_stdev = None
956             item.extend([u"NT", u"NT"])
957         data_c = tbl_dict[tst_name][u"cmp-data"]
958         if data_c:
959             if table[u"include-tests"] == u"MRR":
960                 data_c_mean = data_c[0][0]
961                 data_c_stdev = data_c[0][1]
962             else:
963                 data_c_mean = mean(data_c)
964                 data_c_stdev = stdev(data_c)
965             item.append(round(data_c_mean / 1e6, 1))
966             item.append(round(data_c_stdev / 1e6, 1))
967         else:
968             data_c_mean = None
969             data_c_stdev = None
970             item.extend([u"NT", u"NT"])
971         if item[-2] == u"NT":
972             pass
973         elif item[-4] == u"NT":
974             item.append(u"New in CSIT-2001")
975             item.append(u"New in CSIT-2001")
976         elif data_r_mean is not None and data_c_mean is not None:
977             delta, d_stdev = relative_change_stdev(
978                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
979             )
980             try:
981                 item.append(round(delta))
982             except ValueError:
983                 item.append(delta)
984             try:
985                 item.append(round(d_stdev))
986             except ValueError:
987                 item.append(d_stdev)
988         if rca_data:
989             rca_nr = rca_data.get(item[0], u"-")
990             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
991         if (len(item) == len(header)) and (item[-4] != u"NT"):
992             tbl_lst.append(item)
993
994     tbl_lst = _tpc_sort_table(tbl_lst)
995
996     # Generate csv tables:
997     csv_file = f"{table[u'output-file']}.csv"
998     with open(csv_file, u"wt") as file_handler:
999         file_handler.write(header_str)
1000         for test in tbl_lst:
1001             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1002
1003     txt_file_name = f"{table[u'output-file']}.txt"
1004     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1005
1006     footnote = u""
1007     with open(txt_file_name, u'a') as txt_file:
1008         txt_file.write(legend)
1009         if rca_data:
1010             footnote = rca_data.get(u"footnote", u"")
1011             if footnote:
1012                 txt_file.write(f"\n{footnote}")
1013         txt_file.write(u"\n:END")
1014
1015     # Generate html table:
1016     _tpc_generate_html_table(
1017         header,
1018         tbl_lst,
1019         table[u'output-file'],
1020         legend=legend,
1021         footnote=footnote,
1022         title=table.get(u"title", u"")
1023     )
1024
1025
1026 def table_perf_comparison_nic(table, input_data):
1027     """Generate the table(s) with algorithm: table_perf_comparison
1028     specified in the specification file.
1029
1030     :param table: Table to generate.
1031     :param input_data: Data to process.
1032     :type table: pandas.Series
1033     :type input_data: InputData
1034     """
1035
1036     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1037
1038     # Transform the data
1039     logging.info(
1040         f"    Creating the data set for the {table.get(u'type', u'')} "
1041         f"{table.get(u'title', u'')}."
1042     )
1043     data = input_data.filter_data(table, continue_on_error=True)
1044
1045     # Prepare the header of the tables
1046     try:
1047         header = [u"Test Case", ]
1048         legend = u"\nLegend:\n"
1049
1050         rca_data = None
1051         rca = table.get(u"rca", None)
1052         if rca:
1053             try:
1054                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1055                     rca_data = load(rca_file, Loader=FullLoader)
1056                 header.insert(0, rca.get(u"title", "RCA"))
1057                 legend += (
1058                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
1059                 )
1060             except (YAMLError, IOError) as err:
1061                 logging.warning(repr(err))
1062
1063         history = table.get(u"history", list())
1064         for item in history:
1065             header.extend(
1066                 [
1067                     f"{item[u'title']} Avg({table[u'include-tests']})",
1068                     f"{item[u'title']} Stdev({table[u'include-tests']})"
1069                 ]
1070             )
1071             legend += (
1072                 f"{item[u'title']} Avg({table[u'include-tests']}): "
1073                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1074                 f"a series of runs of the listed tests executed against "
1075                 f"{item[u'title']}.\n"
1076                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1077                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1078                 f"computed from a series of runs of the listed tests executed "
1079                 f"against {item[u'title']}.\n"
1080             )
1081         header.extend(
1082             [
1083                 f"{table[u'reference'][u'title']} "
1084                 f"Avg({table[u'include-tests']})",
1085                 f"{table[u'reference'][u'title']} "
1086                 f"Stdev({table[u'include-tests']})",
1087                 f"{table[u'compare'][u'title']} "
1088                 f"Avg({table[u'include-tests']})",
1089                 f"{table[u'compare'][u'title']} "
1090                 f"Stdev({table[u'include-tests']})",
1091                 f"Diff({table[u'reference'][u'title']},"
1092                 f"{table[u'compare'][u'title']})",
1093                 u"Stdev(Diff)"
1094             ]
1095         )
1096         header_str = u";".join(header) + u"\n"
1097         legend += (
1098             f"{table[u'reference'][u'title']} "
1099             f"Avg({table[u'include-tests']}): "
1100             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1101             f"series of runs of the listed tests executed against "
1102             f"{table[u'reference'][u'title']}.\n"
1103             f"{table[u'reference'][u'title']} "
1104             f"Stdev({table[u'include-tests']}): "
1105             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1106             f"computed from a series of runs of the listed tests executed "
1107             f"against {table[u'reference'][u'title']}.\n"
1108             f"{table[u'compare'][u'title']} "
1109             f"Avg({table[u'include-tests']}): "
1110             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1111             f"series of runs of the listed tests executed against "
1112             f"{table[u'compare'][u'title']}.\n"
1113             f"{table[u'compare'][u'title']} "
1114             f"Stdev({table[u'include-tests']}): "
1115             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1116             f"computed from a series of runs of the listed tests executed "
1117             f"against {table[u'compare'][u'title']}.\n"
1118             f"Diff({table[u'reference'][u'title']},"
1119             f"{table[u'compare'][u'title']}): "
1120             f"Percentage change calculated for mean values.\n"
1121             u"Stdev(Diff): "
1122             u"Standard deviation of percentage change calculated for mean "
1123             u"values.\n"
1124             u"NT: Not Tested\n"
1125         )
1126     except (AttributeError, KeyError) as err:
1127         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1128         return
1129
1130     # Prepare data to the table:
1131     tbl_dict = dict()
1132     for job, builds in table[u"reference"][u"data"].items():
1133         for build in builds:
1134             for tst_name, tst_data in data[job][str(build)].items():
1135                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1136                     continue
1137                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1138                 if (u"across topologies" in table[u"title"].lower() or
1139                         (u" 3n-" in table[u"title"].lower() and
1140                          u" 2n-" in table[u"title"].lower())):
1141                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1142                 if tbl_dict.get(tst_name_mod, None) is None:
1143                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1144                     if u"across testbeds" in table[u"title"].lower() or \
1145                             u"across topologies" in table[u"title"].lower():
1146                         name = _tpc_modify_displayed_test_name(name)
1147                     tbl_dict[tst_name_mod] = {
1148                         u"name": name,
1149                         u"replace-ref": True,
1150                         u"replace-cmp": True,
1151                         u"ref-data": list(),
1152                         u"cmp-data": list()
1153                     }
1154                 _tpc_insert_data(
1155                     target=tbl_dict[tst_name_mod][u"ref-data"],
1156                     src=tst_data,
1157                     include_tests=table[u"include-tests"]
1158                 )
1159
1160     replacement = table[u"reference"].get(u"data-replacement", None)
1161     if replacement:
1162         rpl_data = input_data.filter_data(
1163             table, data=replacement, continue_on_error=True)
1164         for job, builds in replacement.items():
1165             for build in builds:
1166                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1167                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1168                         continue
1169                     tst_name_mod = \
1170                         _tpc_modify_test_name(tst_name, ignore_nic=True)
1171                     if (u"across topologies" in table[u"title"].lower() or
1172                             (u" 3n-" in table[u"title"].lower() and
1173                              u" 2n-" in table[u"title"].lower())):
1174                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1175                     if tbl_dict.get(tst_name_mod, None) is None:
1176                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1177                         if u"across testbeds" in table[u"title"].lower() or \
1178                                 u"across topologies" in table[u"title"].lower():
1179                             name = _tpc_modify_displayed_test_name(name)
1180                         tbl_dict[tst_name_mod] = {
1181                             u"name": name,
1182                             u"replace-ref": False,
1183                             u"replace-cmp": True,
1184                             u"ref-data": list(),
1185                             u"cmp-data": list()
1186                         }
1187                     if tbl_dict[tst_name_mod][u"replace-ref"]:
1188                         tbl_dict[tst_name_mod][u"replace-ref"] = False
1189                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1190
1191                     _tpc_insert_data(
1192                         target=tbl_dict[tst_name_mod][u"ref-data"],
1193                         src=tst_data,
1194                         include_tests=table[u"include-tests"]
1195                     )
1196
1197     for job, builds in table[u"compare"][u"data"].items():
1198         for build in builds:
1199             for tst_name, tst_data in data[job][str(build)].items():
1200                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1201                     continue
1202                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1203                 if (u"across topologies" in table[u"title"].lower() or
1204                         (u" 3n-" in table[u"title"].lower() and
1205                          u" 2n-" in table[u"title"].lower())):
1206                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1207                 if tbl_dict.get(tst_name_mod, None) is None:
1208                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1209                     if u"across testbeds" in table[u"title"].lower() or \
1210                             u"across topologies" in table[u"title"].lower():
1211                         name = _tpc_modify_displayed_test_name(name)
1212                     tbl_dict[tst_name_mod] = {
1213                         u"name": name,
1214                         u"replace-ref": False,
1215                         u"replace-cmp": True,
1216                         u"ref-data": list(),
1217                         u"cmp-data": list()
1218                     }
1219                 _tpc_insert_data(
1220                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1221                     src=tst_data,
1222                     include_tests=table[u"include-tests"]
1223                 )
1224
1225     replacement = table[u"compare"].get(u"data-replacement", None)
1226     if replacement:
1227         rpl_data = input_data.filter_data(
1228             table, data=replacement, continue_on_error=True)
1229         for job, builds in replacement.items():
1230             for build in builds:
1231                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1232                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1233                         continue
1234                     tst_name_mod = \
1235                         _tpc_modify_test_name(tst_name, ignore_nic=True)
1236                     if (u"across topologies" in table[u"title"].lower() or
1237                             (u" 3n-" in table[u"title"].lower() and
1238                              u" 2n-" in table[u"title"].lower())):
1239                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1240                     if tbl_dict.get(tst_name_mod, None) is None:
1241                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1242                         if u"across testbeds" in table[u"title"].lower() or \
1243                                 u"across topologies" in table[u"title"].lower():
1244                             name = _tpc_modify_displayed_test_name(name)
1245                         tbl_dict[tst_name_mod] = {
1246                             u"name": name,
1247                             u"replace-ref": False,
1248                             u"replace-cmp": False,
1249                             u"ref-data": list(),
1250                             u"cmp-data": list()
1251                         }
1252                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
1253                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
1254                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1255
1256                     _tpc_insert_data(
1257                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1258                         src=tst_data,
1259                         include_tests=table[u"include-tests"]
1260                     )
1261
1262     for item in history:
1263         for job, builds in item[u"data"].items():
1264             for build in builds:
1265                 for tst_name, tst_data in data[job][str(build)].items():
1266                     if item[u"nic"] not in tst_data[u"tags"]:
1267                         continue
1268                     tst_name_mod = \
1269                         _tpc_modify_test_name(tst_name, ignore_nic=True)
1270                     if (u"across topologies" in table[u"title"].lower() or
1271                             (u" 3n-" in table[u"title"].lower() and
1272                              u" 2n-" in table[u"title"].lower())):
1273                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1274                     if tbl_dict.get(tst_name_mod, None) is None:
1275                         continue
1276                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1277                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1278                     if tbl_dict[tst_name_mod][u"history"].\
1279                             get(item[u"title"], None) is None:
1280                         tbl_dict[tst_name_mod][u"history"][item[
1281                             u"title"]] = list()
1282                     try:
1283                         if table[u"include-tests"] == u"MRR":
1284                             res = (tst_data[u"result"][u"receive-rate"],
1285                                    tst_data[u"result"][u"receive-stdev"])
1286                         elif table[u"include-tests"] == u"PDR":
1287                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1288                         elif table[u"include-tests"] == u"NDR":
1289                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1290                         else:
1291                             continue
1292                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1293                             append(res)
1294                     except (TypeError, KeyError):
1295                         pass
1296
1297     tbl_lst = list()
1298     for tst_name in tbl_dict:
1299         item = [tbl_dict[tst_name][u"name"], ]
1300         if history:
1301             if tbl_dict[tst_name].get(u"history", None) is not None:
1302                 for hist_data in tbl_dict[tst_name][u"history"].values():
1303                     if hist_data:
1304                         if table[u"include-tests"] == u"MRR":
1305                             item.append(round(hist_data[0][0] / 1e6, 1))
1306                             item.append(round(hist_data[0][1] / 1e6, 1))
1307                         else:
1308                             item.append(round(mean(hist_data) / 1e6, 1))
1309                             item.append(round(stdev(hist_data) / 1e6, 1))
1310                     else:
1311                         item.extend([u"NT", u"NT"])
1312             else:
1313                 item.extend([u"NT", u"NT"])
1314         data_r = tbl_dict[tst_name][u"ref-data"]
1315         if data_r:
1316             if table[u"include-tests"] == u"MRR":
1317                 data_r_mean = data_r[0][0]
1318                 data_r_stdev = data_r[0][1]
1319             else:
1320                 data_r_mean = mean(data_r)
1321                 data_r_stdev = stdev(data_r)
1322             item.append(round(data_r_mean / 1e6, 1))
1323             item.append(round(data_r_stdev / 1e6, 1))
1324         else:
1325             data_r_mean = None
1326             data_r_stdev = None
1327             item.extend([u"NT", u"NT"])
1328         data_c = tbl_dict[tst_name][u"cmp-data"]
1329         if data_c:
1330             if table[u"include-tests"] == u"MRR":
1331                 data_c_mean = data_c[0][0]
1332                 data_c_stdev = data_c[0][1]
1333             else:
1334                 data_c_mean = mean(data_c)
1335                 data_c_stdev = stdev(data_c)
1336             item.append(round(data_c_mean / 1e6, 1))
1337             item.append(round(data_c_stdev / 1e6, 1))
1338         else:
1339             data_c_mean = None
1340             data_c_stdev = None
1341             item.extend([u"NT", u"NT"])
1342         if item[-2] == u"NT":
1343             pass
1344         elif item[-4] == u"NT":
1345             item.append(u"New in CSIT-2001")
1346             item.append(u"New in CSIT-2001")
1347         elif data_r_mean is not None and data_c_mean is not None:
1348             delta, d_stdev = relative_change_stdev(
1349                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1350             )
1351             try:
1352                 item.append(round(delta))
1353             except ValueError:
1354                 item.append(delta)
1355             try:
1356                 item.append(round(d_stdev))
1357             except ValueError:
1358                 item.append(d_stdev)
1359         if rca_data:
1360             rca_nr = rca_data.get(item[0], u"-")
1361             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1362         if (len(item) == len(header)) and (item[-4] != u"NT"):
1363             tbl_lst.append(item)
1364
1365     tbl_lst = _tpc_sort_table(tbl_lst)
1366
1367     # Generate csv tables:
1368     csv_file = f"{table[u'output-file']}.csv"
1369     with open(csv_file, u"wt") as file_handler:
1370         file_handler.write(header_str)
1371         for test in tbl_lst:
1372             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1373
1374     txt_file_name = f"{table[u'output-file']}.txt"
1375     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1376
1377     footnote = u""
1378     with open(txt_file_name, u'a') as txt_file:
1379         txt_file.write(legend)
1380         if rca_data:
1381             footnote = rca_data.get(u"footnote", u"")
1382             if footnote:
1383                 txt_file.write(f"\n{footnote}")
1384         txt_file.write(u"\n:END")
1385
1386     # Generate html table:
1387     _tpc_generate_html_table(
1388         header,
1389         tbl_lst,
1390         table[u'output-file'],
1391         legend=legend,
1392         footnote=footnote,
1393         title=table.get(u"title", u"")
1394     )
1395
1396
1397 def table_nics_comparison(table, input_data):
1398     """Generate the table(s) with algorithm: table_nics_comparison
1399     specified in the specification file.
1400
1401     :param table: Table to generate.
1402     :param input_data: Data to process.
1403     :type table: pandas.Series
1404     :type input_data: InputData
1405     """
1406
1407     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1408
1409     # Transform the data
1410     logging.info(
1411         f"    Creating the data set for the {table.get(u'type', u'')} "
1412         f"{table.get(u'title', u'')}."
1413     )
1414     data = input_data.filter_data(table, continue_on_error=True)
1415
1416     # Prepare the header of the tables
1417     try:
1418         header = [
1419             u"Test Case",
1420             f"{table[u'reference'][u'title']} "
1421             f"Avg({table[u'include-tests']})",
1422             f"{table[u'reference'][u'title']} "
1423             f"Stdev({table[u'include-tests']})",
1424             f"{table[u'compare'][u'title']} "
1425             f"Avg({table[u'include-tests']})",
1426             f"{table[u'compare'][u'title']} "
1427             f"Stdev({table[u'include-tests']})",
1428             f"Diff({table[u'reference'][u'title']},"
1429             f"{table[u'compare'][u'title']})",
1430             u"Stdev(Diff)"
1431         ]
1432         legend = (
1433             u"\nLegend:\n"
1434             f"{table[u'reference'][u'title']} "
1435             f"Avg({table[u'include-tests']}): "
1436             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1437             f"series of runs of the listed tests executed using "
1438             f"{table[u'reference'][u'title']} NIC.\n"
1439             f"{table[u'reference'][u'title']} "
1440             f"Stdev({table[u'include-tests']}): "
1441             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1442             f"computed from a series of runs of the listed tests executed "
1443             f"using {table[u'reference'][u'title']} NIC.\n"
1444             f"{table[u'compare'][u'title']} "
1445             f"Avg({table[u'include-tests']}): "
1446             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1447             f"series of runs of the listed tests executed using "
1448             f"{table[u'compare'][u'title']} NIC.\n"
1449             f"{table[u'compare'][u'title']} "
1450             f"Stdev({table[u'include-tests']}): "
1451             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1452             f"computed from a series of runs of the listed tests executed "
1453             f"using {table[u'compare'][u'title']} NIC.\n"
1454             f"Diff({table[u'reference'][u'title']},"
1455             f"{table[u'compare'][u'title']}): "
1456             f"Percentage change calculated for mean values.\n"
1457             u"Stdev(Diff): "
1458             u"Standard deviation of percentage change calculated for mean "
1459             u"values.\n"
1460             u":END"
1461         )
1462
1463     except (AttributeError, KeyError) as err:
1464         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1465         return
1466
1467     # Prepare data to the table:
1468     tbl_dict = dict()
1469     for job, builds in table[u"data"].items():
1470         for build in builds:
1471             for tst_name, tst_data in data[job][str(build)].items():
1472                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1473                 if tbl_dict.get(tst_name_mod, None) is None:
1474                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1475                     tbl_dict[tst_name_mod] = {
1476                         u"name": name,
1477                         u"ref-data": list(),
1478                         u"cmp-data": list()
1479                     }
1480                 try:
1481                     if table[u"include-tests"] == u"MRR":
1482                         result = (tst_data[u"result"][u"receive-rate"],
1483                                   tst_data[u"result"][u"receive-stdev"])
1484                     elif table[u"include-tests"] == u"PDR":
1485                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1486                     elif table[u"include-tests"] == u"NDR":
1487                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1488                     else:
1489                         continue
1490
1491                     if result and \
1492                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1493                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1494                     elif result and \
1495                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1496                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1497                 except (TypeError, KeyError) as err:
1498                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1499                     # No data in output.xml for this test
1500
1501     tbl_lst = list()
1502     for tst_name in tbl_dict:
1503         item = [tbl_dict[tst_name][u"name"], ]
1504         data_r = tbl_dict[tst_name][u"ref-data"]
1505         if data_r:
1506             if table[u"include-tests"] == u"MRR":
1507                 data_r_mean = data_r[0][0]
1508                 data_r_stdev = data_r[0][1]
1509             else:
1510                 data_r_mean = mean(data_r)
1511                 data_r_stdev = stdev(data_r)
1512             item.append(round(data_r_mean / 1e6, 1))
1513             item.append(round(data_r_stdev / 1e6, 1))
1514         else:
1515             data_r_mean = None
1516             data_r_stdev = None
1517             item.extend([None, None])
1518         data_c = tbl_dict[tst_name][u"cmp-data"]
1519         if data_c:
1520             if table[u"include-tests"] == u"MRR":
1521                 data_c_mean = data_c[0][0]
1522                 data_c_stdev = data_c[0][1]
1523             else:
1524                 data_c_mean = mean(data_c)
1525                 data_c_stdev = stdev(data_c)
1526             item.append(round(data_c_mean / 1e6, 1))
1527             item.append(round(data_c_stdev / 1e6, 1))
1528         else:
1529             data_c_mean = None
1530             data_c_stdev = None
1531             item.extend([None, None])
1532         if data_r_mean is not None and data_c_mean is not None:
1533             delta, d_stdev = relative_change_stdev(
1534                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1535             )
1536             try:
1537                 item.append(round(delta))
1538             except ValueError:
1539                 item.append(delta)
1540             try:
1541                 item.append(round(d_stdev))
1542             except ValueError:
1543                 item.append(d_stdev)
1544             tbl_lst.append(item)
1545
1546     # Sort the table according to the relative change
1547     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1548
1549     # Generate csv tables:
1550     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1551         file_handler.write(u";".join(header) + u"\n")
1552         for test in tbl_lst:
1553             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1554
1555     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1556                               f"{table[u'output-file']}.txt",
1557                               delimiter=u";")
1558
1559     with open(table[u'output-file'], u'a') as txt_file:
1560         txt_file.write(legend)
1561
1562     # Generate html table:
1563     _tpc_generate_html_table(
1564         header,
1565         tbl_lst,
1566         table[u'output-file'],
1567         legend=legend,
1568         title=table.get(u"title", u"")
1569     )
1570
1571
1572 def table_soak_vs_ndr(table, input_data):
1573     """Generate the table(s) with algorithm: table_soak_vs_ndr
1574     specified in the specification file.
1575
1576     :param table: Table to generate.
1577     :param input_data: Data to process.
1578     :type table: pandas.Series
1579     :type input_data: InputData
1580     """
1581
1582     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1583
1584     # Transform the data
1585     logging.info(
1586         f"    Creating the data set for the {table.get(u'type', u'')} "
1587         f"{table.get(u'title', u'')}."
1588     )
1589     data = input_data.filter_data(table, continue_on_error=True)
1590
1591     # Prepare the header of the table
1592     try:
1593         header = [
1594             u"Test Case",
1595             f"Avg({table[u'reference'][u'title']})",
1596             f"Stdev({table[u'reference'][u'title']})",
1597             f"Avg({table[u'compare'][u'title']})",
1598             f"Stdev{table[u'compare'][u'title']})",
1599             u"Diff",
1600             u"Stdev(Diff)"
1601         ]
1602         header_str = u";".join(header) + u"\n"
1603         legend = (
1604             u"\nLegend:\n"
1605             f"Avg({table[u'reference'][u'title']}): "
1606             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1607             f"from a series of runs of the listed tests.\n"
1608             f"Stdev({table[u'reference'][u'title']}): "
1609             f"Standard deviation value of {table[u'reference'][u'title']} "
1610             f"[Mpps] computed from a series of runs of the listed tests.\n"
1611             f"Avg({table[u'compare'][u'title']}): "
1612             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1613             f"a series of runs of the listed tests.\n"
1614             f"Stdev({table[u'compare'][u'title']}): "
1615             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1616             f"computed from a series of runs of the listed tests.\n"
1617             f"Diff({table[u'reference'][u'title']},"
1618             f"{table[u'compare'][u'title']}): "
1619             f"Percentage change calculated for mean values.\n"
1620             u"Stdev(Diff): "
1621             u"Standard deviation of percentage change calculated for mean "
1622             u"values.\n"
1623             u":END"
1624         )
1625     except (AttributeError, KeyError) as err:
1626         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1627         return
1628
1629     # Create a list of available SOAK test results:
1630     tbl_dict = dict()
1631     for job, builds in table[u"compare"][u"data"].items():
1632         for build in builds:
1633             for tst_name, tst_data in data[job][str(build)].items():
1634                 if tst_data[u"type"] == u"SOAK":
1635                     tst_name_mod = tst_name.replace(u"-soak", u"")
1636                     if tbl_dict.get(tst_name_mod, None) is None:
1637                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1638                         nic = groups.group(0) if groups else u""
1639                         name = (
1640                             f"{nic}-"
1641                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1642                         )
1643                         tbl_dict[tst_name_mod] = {
1644                             u"name": name,
1645                             u"ref-data": list(),
1646                             u"cmp-data": list()
1647                         }
1648                     try:
1649                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1650                             tst_data[u"throughput"][u"LOWER"])
1651                     except (KeyError, TypeError):
1652                         pass
1653     tests_lst = tbl_dict.keys()
1654
1655     # Add corresponding NDR test results:
1656     for job, builds in table[u"reference"][u"data"].items():
1657         for build in builds:
1658             for tst_name, tst_data in data[job][str(build)].items():
1659                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1660                     replace(u"-mrr", u"")
1661                 if tst_name_mod not in tests_lst:
1662                     continue
1663                 try:
1664                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1665                         continue
1666                     if table[u"include-tests"] == u"MRR":
1667                         result = (tst_data[u"result"][u"receive-rate"],
1668                                   tst_data[u"result"][u"receive-stdev"])
1669                     elif table[u"include-tests"] == u"PDR":
1670                         result = \
1671                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1672                     elif table[u"include-tests"] == u"NDR":
1673                         result = \
1674                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1675                     else:
1676                         result = None
1677                     if result is not None:
1678                         tbl_dict[tst_name_mod][u"ref-data"].append(
1679                             result)
1680                 except (KeyError, TypeError):
1681                     continue
1682
1683     tbl_lst = list()
1684     for tst_name in tbl_dict:
1685         item = [tbl_dict[tst_name][u"name"], ]
1686         data_r = tbl_dict[tst_name][u"ref-data"]
1687         if data_r:
1688             if table[u"include-tests"] == u"MRR":
1689                 data_r_mean = data_r[0][0]
1690                 data_r_stdev = data_r[0][1]
1691             else:
1692                 data_r_mean = mean(data_r)
1693                 data_r_stdev = stdev(data_r)
1694             item.append(round(data_r_mean / 1e6, 1))
1695             item.append(round(data_r_stdev / 1e6, 1))
1696         else:
1697             data_r_mean = None
1698             data_r_stdev = None
1699             item.extend([None, None])
1700         data_c = tbl_dict[tst_name][u"cmp-data"]
1701         if data_c:
1702             if table[u"include-tests"] == u"MRR":
1703                 data_c_mean = data_c[0][0]
1704                 data_c_stdev = data_c[0][1]
1705             else:
1706                 data_c_mean = mean(data_c)
1707                 data_c_stdev = stdev(data_c)
1708             item.append(round(data_c_mean / 1e6, 1))
1709             item.append(round(data_c_stdev / 1e6, 1))
1710         else:
1711             data_c_mean = None
1712             data_c_stdev = None
1713             item.extend([None, None])
1714         if data_r_mean is not None and data_c_mean is not None:
1715             delta, d_stdev = relative_change_stdev(
1716                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1717             try:
1718                 item.append(round(delta))
1719             except ValueError:
1720                 item.append(delta)
1721             try:
1722                 item.append(round(d_stdev))
1723             except ValueError:
1724                 item.append(d_stdev)
1725             tbl_lst.append(item)
1726
1727     # Sort the table according to the relative change
1728     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1729
1730     # Generate csv tables:
1731     csv_file = f"{table[u'output-file']}.csv"
1732     with open(csv_file, u"wt") as file_handler:
1733         file_handler.write(header_str)
1734         for test in tbl_lst:
1735             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1736
1737     convert_csv_to_pretty_txt(
1738         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1739     )
1740     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1741         txt_file.write(legend)
1742
1743     # Generate html table:
1744     _tpc_generate_html_table(
1745         header,
1746         tbl_lst,
1747         table[u'output-file'],
1748         legend=legend,
1749         title=table.get(u"title", u"")
1750     )
1751
1752
1753 def table_perf_trending_dash(table, input_data):
1754     """Generate the table(s) with algorithm:
1755     table_perf_trending_dash
1756     specified in the specification file.
1757
1758     :param table: Table to generate.
1759     :param input_data: Data to process.
1760     :type table: pandas.Series
1761     :type input_data: InputData
1762     """
1763
1764     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1765
1766     # Transform the data
1767     logging.info(
1768         f"    Creating the data set for the {table.get(u'type', u'')} "
1769         f"{table.get(u'title', u'')}."
1770     )
1771     data = input_data.filter_data(table, continue_on_error=True)
1772
1773     # Prepare the header of the tables
1774     header = [
1775         u"Test Case",
1776         u"Trend [Mpps]",
1777         u"Short-Term Change [%]",
1778         u"Long-Term Change [%]",
1779         u"Regressions [#]",
1780         u"Progressions [#]"
1781     ]
1782     header_str = u",".join(header) + u"\n"
1783
1784     # Prepare data to the table:
1785     tbl_dict = dict()
1786     for job, builds in table[u"data"].items():
1787         for build in builds:
1788             for tst_name, tst_data in data[job][str(build)].items():
1789                 if tst_name.lower() in table.get(u"ignore-list", list()):
1790                     continue
1791                 if tbl_dict.get(tst_name, None) is None:
1792                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1793                     if not groups:
1794                         continue
1795                     nic = groups.group(0)
1796                     tbl_dict[tst_name] = {
1797                         u"name": f"{nic}-{tst_data[u'name']}",
1798                         u"data": OrderedDict()
1799                     }
1800                 try:
1801                     tbl_dict[tst_name][u"data"][str(build)] = \
1802                         tst_data[u"result"][u"receive-rate"]
1803                 except (TypeError, KeyError):
1804                     pass  # No data in output.xml for this test
1805
1806     tbl_lst = list()
1807     for tst_name in tbl_dict:
1808         data_t = tbl_dict[tst_name][u"data"]
1809         if len(data_t) < 2:
1810             continue
1811
1812         classification_lst, avgs = classify_anomalies(data_t)
1813
1814         win_size = min(len(data_t), table[u"window"])
1815         long_win_size = min(len(data_t), table[u"long-trend-window"])
1816
1817         try:
1818             max_long_avg = max(
1819                 [x for x in avgs[-long_win_size:-win_size]
1820                  if not isnan(x)])
1821         except ValueError:
1822             max_long_avg = nan
1823         last_avg = avgs[-1]
1824         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1825
1826         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1827             rel_change_last = nan
1828         else:
1829             rel_change_last = round(
1830                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1831
1832         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1833             rel_change_long = nan
1834         else:
1835             rel_change_long = round(
1836                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1837
1838         if classification_lst:
1839             if isnan(rel_change_last) and isnan(rel_change_long):
1840                 continue
1841             if isnan(last_avg) or isnan(rel_change_last) or \
1842                     isnan(rel_change_long):
1843                 continue
1844             tbl_lst.append(
1845                 [tbl_dict[tst_name][u"name"],
1846                  round(last_avg / 1e6, 2),
1847                  rel_change_last,
1848                  rel_change_long,
1849                  classification_lst[-win_size:].count(u"regression"),
1850                  classification_lst[-win_size:].count(u"progression")])
1851
1852     tbl_lst.sort(key=lambda rel: rel[0])
1853
1854     tbl_sorted = list()
1855     for nrr in range(table[u"window"], -1, -1):
1856         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1857         for nrp in range(table[u"window"], -1, -1):
1858             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1859             tbl_out.sort(key=lambda rel: rel[2])
1860             tbl_sorted.extend(tbl_out)
1861
1862     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1863
1864     logging.info(f"    Writing file: {file_name}")
1865     with open(file_name, u"wt") as file_handler:
1866         file_handler.write(header_str)
1867         for test in tbl_sorted:
1868             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1869
1870     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1871     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1872
1873
1874 def _generate_url(testbed, test_name):
1875     """Generate URL to a trending plot from the name of the test case.
1876
1877     :param testbed: The testbed used for testing.
1878     :param test_name: The name of the test case.
1879     :type testbed: str
1880     :type test_name: str
1881     :returns: The URL to the plot with the trending data for the given test
1882         case.
1883     :rtype str
1884     """
1885
1886     if u"x520" in test_name:
1887         nic = u"x520"
1888     elif u"x710" in test_name:
1889         nic = u"x710"
1890     elif u"xl710" in test_name:
1891         nic = u"xl710"
1892     elif u"xxv710" in test_name:
1893         nic = u"xxv710"
1894     elif u"vic1227" in test_name:
1895         nic = u"vic1227"
1896     elif u"vic1385" in test_name:
1897         nic = u"vic1385"
1898     elif u"x553" in test_name:
1899         nic = u"x553"
1900     elif u"cx556" in test_name or u"cx556a" in test_name:
1901         nic = u"cx556a"
1902     else:
1903         nic = u""
1904
1905     if u"64b" in test_name:
1906         frame_size = u"64b"
1907     elif u"78b" in test_name:
1908         frame_size = u"78b"
1909     elif u"imix" in test_name:
1910         frame_size = u"imix"
1911     elif u"9000b" in test_name:
1912         frame_size = u"9000b"
1913     elif u"1518b" in test_name:
1914         frame_size = u"1518b"
1915     elif u"114b" in test_name:
1916         frame_size = u"114b"
1917     else:
1918         frame_size = u""
1919
1920     if u"1t1c" in test_name or \
1921         (u"-1c-" in test_name and
1922          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1923         cores = u"1t1c"
1924     elif u"2t2c" in test_name or \
1925          (u"-2c-" in test_name and
1926           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1927         cores = u"2t2c"
1928     elif u"4t4c" in test_name or \
1929          (u"-4c-" in test_name and
1930           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1931         cores = u"4t4c"
1932     elif u"2t1c" in test_name or \
1933          (u"-1c-" in test_name and
1934           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1935         cores = u"2t1c"
1936     elif u"4t2c" in test_name or \
1937          (u"-2c-" in test_name and
1938           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1939         cores = u"4t2c"
1940     elif u"8t4c" in test_name or \
1941          (u"-4c-" in test_name and
1942           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1943         cores = u"8t4c"
1944     else:
1945         cores = u""
1946
1947     if u"testpmd" in test_name:
1948         driver = u"testpmd"
1949     elif u"l3fwd" in test_name:
1950         driver = u"l3fwd"
1951     elif u"avf" in test_name:
1952         driver = u"avf"
1953     elif u"rdma" in test_name:
1954         driver = u"rdma"
1955     elif u"dnv" in testbed or u"tsh" in testbed:
1956         driver = u"ixgbe"
1957     else:
1958         driver = u"dpdk"
1959
1960     if u"acl" in test_name or \
1961             u"macip" in test_name or \
1962             u"nat" in test_name or \
1963             u"policer" in test_name or \
1964             u"cop" in test_name:
1965         bsf = u"features"
1966     elif u"scale" in test_name:
1967         bsf = u"scale"
1968     elif u"base" in test_name:
1969         bsf = u"base"
1970     else:
1971         bsf = u"base"
1972
1973     if u"114b" in test_name and u"vhost" in test_name:
1974         domain = u"vts"
1975     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1976         domain = u"dpdk"
1977     elif u"memif" in test_name:
1978         domain = u"container_memif"
1979     elif u"srv6" in test_name:
1980         domain = u"srv6"
1981     elif u"vhost" in test_name:
1982         domain = u"vhost"
1983         if u"vppl2xc" in test_name:
1984             driver += u"-vpp"
1985         else:
1986             driver += u"-testpmd"
1987         if u"lbvpplacp" in test_name:
1988             bsf += u"-link-bonding"
1989     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1990         domain = u"nf_service_density_vnfc"
1991     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1992         domain = u"nf_service_density_cnfc"
1993     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1994         domain = u"nf_service_density_cnfp"
1995     elif u"ipsec" in test_name:
1996         domain = u"ipsec"
1997         if u"sw" in test_name:
1998             bsf += u"-sw"
1999         elif u"hw" in test_name:
2000             bsf += u"-hw"
2001     elif u"ethip4vxlan" in test_name:
2002         domain = u"ip4_tunnels"
2003     elif u"ip4base" in test_name or u"ip4scale" in test_name:
2004         domain = u"ip4"
2005     elif u"ip6base" in test_name or u"ip6scale" in test_name:
2006         domain = u"ip6"
2007     elif u"l2xcbase" in test_name or \
2008             u"l2xcscale" in test_name or \
2009             u"l2bdbasemaclrn" in test_name or \
2010             u"l2bdscale" in test_name or \
2011             u"l2patch" in test_name:
2012         domain = u"l2"
2013     else:
2014         domain = u""
2015
2016     file_name = u"-".join((domain, testbed, nic)) + u".html#"
2017     anchor_name = u"-".join((frame_size, cores, bsf, driver))
2018
2019     return file_name + anchor_name
2020
2021
2022 def table_perf_trending_dash_html(table, input_data):
2023     """Generate the table(s) with algorithm:
2024     table_perf_trending_dash_html specified in the specification
2025     file.
2026
2027     :param table: Table to generate.
2028     :param input_data: Data to process.
2029     :type table: dict
2030     :type input_data: InputData
2031     """
2032
2033     _ = input_data
2034
2035     if not table.get(u"testbed", None):
2036         logging.error(
2037             f"The testbed is not defined for the table "
2038             f"{table.get(u'title', u'')}."
2039         )
2040         return
2041
2042     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2043
2044     try:
2045         with open(table[u"input-file"], u'rt') as csv_file:
2046             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2047     except KeyError:
2048         logging.warning(u"The input file is not defined.")
2049         return
2050     except csv.Error as err:
2051         logging.warning(
2052             f"Not possible to process the file {table[u'input-file']}.\n"
2053             f"{repr(err)}"
2054         )
2055         return
2056
2057     # Table:
2058     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2059
2060     # Table header:
2061     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2062     for idx, item in enumerate(csv_lst[0]):
2063         alignment = u"left" if idx == 0 else u"center"
2064         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2065         thead.text = item
2066
2067     # Rows:
2068     colors = {
2069         u"regression": (
2070             u"#ffcccc",
2071             u"#ff9999"
2072         ),
2073         u"progression": (
2074             u"#c6ecc6",
2075             u"#9fdf9f"
2076         ),
2077         u"normal": (
2078             u"#e9f1fb",
2079             u"#d4e4f7"
2080         )
2081     }
2082     for r_idx, row in enumerate(csv_lst[1:]):
2083         if int(row[4]):
2084             color = u"regression"
2085         elif int(row[5]):
2086             color = u"progression"
2087         else:
2088             color = u"normal"
2089         trow = ET.SubElement(
2090             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2091         )
2092
2093         # Columns:
2094         for c_idx, item in enumerate(row):
2095             tdata = ET.SubElement(
2096                 trow,
2097                 u"td",
2098                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2099             )
2100             # Name:
2101             if c_idx == 0:
2102                 ref = ET.SubElement(
2103                     tdata,
2104                     u"a",
2105                     attrib=dict(
2106                         href=f"../trending/"
2107                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2108                     )
2109                 )
2110                 ref.text = item
2111             else:
2112                 tdata.text = item
2113     try:
2114         with open(table[u"output-file"], u'w') as html_file:
2115             logging.info(f"    Writing file: {table[u'output-file']}")
2116             html_file.write(u".. raw:: html\n\n\t")
2117             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2118             html_file.write(u"\n\t<p><br><br></p>\n")
2119     except KeyError:
2120         logging.warning(u"The output file is not defined.")
2121         return
2122
2123
2124 def table_last_failed_tests(table, input_data):
2125     """Generate the table(s) with algorithm: table_last_failed_tests
2126     specified in the specification file.
2127
2128     :param table: Table to generate.
2129     :param input_data: Data to process.
2130     :type table: pandas.Series
2131     :type input_data: InputData
2132     """
2133
2134     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2135
2136     # Transform the data
2137     logging.info(
2138         f"    Creating the data set for the {table.get(u'type', u'')} "
2139         f"{table.get(u'title', u'')}."
2140     )
2141
2142     data = input_data.filter_data(table, continue_on_error=True)
2143
2144     if data is None or data.empty:
2145         logging.warning(
2146             f"    No data for the {table.get(u'type', u'')} "
2147             f"{table.get(u'title', u'')}."
2148         )
2149         return
2150
2151     tbl_list = list()
2152     for job, builds in table[u"data"].items():
2153         for build in builds:
2154             build = str(build)
2155             try:
2156                 version = input_data.metadata(job, build).get(u"version", u"")
2157             except KeyError:
2158                 logging.error(f"Data for {job}: {build} is not present.")
2159                 return
2160             tbl_list.append(build)
2161             tbl_list.append(version)
2162             failed_tests = list()
2163             passed = 0
2164             failed = 0
2165             for tst_data in data[job][build].values:
2166                 if tst_data[u"status"] != u"FAIL":
2167                     passed += 1
2168                     continue
2169                 failed += 1
2170                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2171                 if not groups:
2172                     continue
2173                 nic = groups.group(0)
2174                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2175             tbl_list.append(str(passed))
2176             tbl_list.append(str(failed))
2177             tbl_list.extend(failed_tests)
2178
2179     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2180     logging.info(f"    Writing file: {file_name}")
2181     with open(file_name, u"wt") as file_handler:
2182         for test in tbl_list:
2183             file_handler.write(test + u'\n')
2184
2185
2186 def table_failed_tests(table, input_data):
2187     """Generate the table(s) with algorithm: table_failed_tests
2188     specified in the specification file.
2189
2190     :param table: Table to generate.
2191     :param input_data: Data to process.
2192     :type table: pandas.Series
2193     :type input_data: InputData
2194     """
2195
2196     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2197
2198     # Transform the data
2199     logging.info(
2200         f"    Creating the data set for the {table.get(u'type', u'')} "
2201         f"{table.get(u'title', u'')}."
2202     )
2203     data = input_data.filter_data(table, continue_on_error=True)
2204
2205     # Prepare the header of the tables
2206     header = [
2207         u"Test Case",
2208         u"Failures [#]",
2209         u"Last Failure [Time]",
2210         u"Last Failure [VPP-Build-Id]",
2211         u"Last Failure [CSIT-Job-Build-Id]"
2212     ]
2213
2214     # Generate the data for the table according to the model in the table
2215     # specification
2216
2217     now = dt.utcnow()
2218     timeperiod = timedelta(int(table.get(u"window", 7)))
2219
2220     tbl_dict = dict()
2221     for job, builds in table[u"data"].items():
2222         for build in builds:
2223             build = str(build)
2224             for tst_name, tst_data in data[job][build].items():
2225                 if tst_name.lower() in table.get(u"ignore-list", list()):
2226                     continue
2227                 if tbl_dict.get(tst_name, None) is None:
2228                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
2229                     if not groups:
2230                         continue
2231                     nic = groups.group(0)
2232                     tbl_dict[tst_name] = {
2233                         u"name": f"{nic}-{tst_data[u'name']}",
2234                         u"data": OrderedDict()
2235                     }
2236                 try:
2237                     generated = input_data.metadata(job, build).\
2238                         get(u"generated", u"")
2239                     if not generated:
2240                         continue
2241                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
2242                     if (now - then) <= timeperiod:
2243                         tbl_dict[tst_name][u"data"][build] = (
2244                             tst_data[u"status"],
2245                             generated,
2246                             input_data.metadata(job, build).get(u"version",
2247                                                                 u""),
2248                             build
2249                         )
2250                 except (TypeError, KeyError) as err:
2251                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2252
2253     max_fails = 0
2254     tbl_lst = list()
2255     for tst_data in tbl_dict.values():
2256         fails_nr = 0
2257         fails_last_date = u""
2258         fails_last_vpp = u""
2259         fails_last_csit = u""
2260         for val in tst_data[u"data"].values():
2261             if val[0] == u"FAIL":
2262                 fails_nr += 1
2263                 fails_last_date = val[1]
2264                 fails_last_vpp = val[2]
2265                 fails_last_csit = val[3]
2266         if fails_nr:
2267             max_fails = fails_nr if fails_nr > max_fails else max_fails
2268             tbl_lst.append(
2269                 [
2270                     tst_data[u"name"],
2271                     fails_nr,
2272                     fails_last_date,
2273                     fails_last_vpp,
2274                     f"mrr-daily-build-{fails_last_csit}"
2275                 ]
2276             )
2277
2278     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2279     tbl_sorted = list()
2280     for nrf in range(max_fails, -1, -1):
2281         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2282         tbl_sorted.extend(tbl_fails)
2283
2284     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2285     logging.info(f"    Writing file: {file_name}")
2286     with open(file_name, u"wt") as file_handler:
2287         file_handler.write(u",".join(header) + u"\n")
2288         for test in tbl_sorted:
2289             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2290
2291     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2292     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2293
2294
2295 def table_failed_tests_html(table, input_data):
2296     """Generate the table(s) with algorithm: table_failed_tests_html
2297     specified in the specification file.
2298
2299     :param table: Table to generate.
2300     :param input_data: Data to process.
2301     :type table: pandas.Series
2302     :type input_data: InputData
2303     """
2304
2305     _ = input_data
2306
2307     if not table.get(u"testbed", None):
2308         logging.error(
2309             f"The testbed is not defined for the table "
2310             f"{table.get(u'title', u'')}."
2311         )
2312         return
2313
2314     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2315
2316     try:
2317         with open(table[u"input-file"], u'rt') as csv_file:
2318             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2319     except KeyError:
2320         logging.warning(u"The input file is not defined.")
2321         return
2322     except csv.Error as err:
2323         logging.warning(
2324             f"Not possible to process the file {table[u'input-file']}.\n"
2325             f"{repr(err)}"
2326         )
2327         return
2328
2329     # Table:
2330     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2331
2332     # Table header:
2333     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2334     for idx, item in enumerate(csv_lst[0]):
2335         alignment = u"left" if idx == 0 else u"center"
2336         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2337         thead.text = item
2338
2339     # Rows:
2340     colors = (u"#e9f1fb", u"#d4e4f7")
2341     for r_idx, row in enumerate(csv_lst[1:]):
2342         background = colors[r_idx % 2]
2343         trow = ET.SubElement(
2344             failed_tests, u"tr", attrib=dict(bgcolor=background)
2345         )
2346
2347         # Columns:
2348         for c_idx, item in enumerate(row):
2349             tdata = ET.SubElement(
2350                 trow,
2351                 u"td",
2352                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2353             )
2354             # Name:
2355             if c_idx == 0:
2356                 ref = ET.SubElement(
2357                     tdata,
2358                     u"a",
2359                     attrib=dict(
2360                         href=f"../trending/"
2361                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2362                     )
2363                 )
2364                 ref.text = item
2365             else:
2366                 tdata.text = item
2367     try:
2368         with open(table[u"output-file"], u'w') as html_file:
2369             logging.info(f"    Writing file: {table[u'output-file']}")
2370             html_file.write(u".. raw:: html\n\n\t")
2371             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2372             html_file.write(u"\n\t<p><br><br></p>\n")
2373     except KeyError:
2374         logging.warning(u"The output file is not defined.")
2375         return
2376
2377
2378 def table_comparison(table, input_data):
2379     """Generate the table(s) with algorithm: table_comparison
2380     specified in the specification file.
2381
2382     :param table: Table to generate.
2383     :param input_data: Data to process.
2384     :type table: pandas.Series
2385     :type input_data: InputData
2386     """
2387     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2388
2389     # Transform the data
2390     logging.info(
2391         f"    Creating the data set for the {table.get(u'type', u'')} "
2392         f"{table.get(u'title', u'')}."
2393     )
2394
2395     columns = table.get(u"columns", None)
2396     if not columns:
2397         logging.error(
2398             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2399         )
2400         return
2401
2402     cols = list()
2403     for idx, col in enumerate(columns):
2404         if col.get(u"data-set", None) is None:
2405             logging.warning(f"No data for column {col.get(u'title', u'')}")
2406             continue
2407         data = input_data.filter_data(
2408             table,
2409             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2410             data=col[u"data-set"],
2411             continue_on_error=True
2412         )
2413         col_data = {
2414             u"title": col.get(u"title", f"Column{idx}"),
2415             u"data": dict()
2416         }
2417         for builds in data.values:
2418             for build in builds:
2419                 for tst_name, tst_data in build.items():
2420                     tst_name_mod = \
2421                         _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2422                     if col_data[u"data"].get(tst_name_mod, None) is None:
2423                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
2424                         if u"across testbeds" in table[u"title"].lower() or \
2425                                 u"across topologies" in table[u"title"].lower():
2426                             name = _tpc_modify_displayed_test_name(name)
2427                         col_data[u"data"][tst_name_mod] = {
2428                             u"name": name,
2429                             u"replace": True,
2430                             u"data": list(),
2431                             u"mean": None,
2432                             u"stdev": None
2433                         }
2434                     _tpc_insert_data(
2435                         target=col_data[u"data"][tst_name_mod][u"data"],
2436                         src=tst_data,
2437                         include_tests=table[u"include-tests"]
2438                     )
2439
2440         replacement = col.get(u"data-replacement", None)
2441         if replacement:
2442             rpl_data = input_data.filter_data(
2443                 table,
2444                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2445                 data=replacement,
2446                 continue_on_error=True
2447             )
2448             for builds in rpl_data.values:
2449                 for build in builds:
2450                     for tst_name, tst_data in build.items():
2451                         tst_name_mod = \
2452                             _tpc_modify_test_name(tst_name).\
2453                             replace(u"2n1l-", u"")
2454                         if col_data[u"data"].get(tst_name_mod, None) is None:
2455                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
2456                             if u"across testbeds" in table[u"title"].lower() \
2457                                     or u"across topologies" in \
2458                                     table[u"title"].lower():
2459                                 name = _tpc_modify_displayed_test_name(name)
2460                             col_data[u"data"][tst_name_mod] = {
2461                                 u"name": name,
2462                                 u"replace": False,
2463                                 u"data": list(),
2464                                 u"mean": None,
2465                                 u"stdev": None
2466                             }
2467                         if col_data[u"data"][tst_name_mod][u"replace"]:
2468                             col_data[u"data"][tst_name_mod][u"replace"] = False
2469                             col_data[u"data"][tst_name_mod][u"data"] = list()
2470                         _tpc_insert_data(
2471                             target=col_data[u"data"][tst_name_mod][u"data"],
2472                             src=tst_data,
2473                             include_tests=table[u"include-tests"]
2474                         )
2475
2476         if table[u"include-tests"] in (u"NDR", u"PDR"):
2477             for tst_name, tst_data in col_data[u"data"].items():
2478                 if tst_data[u"data"]:
2479                     tst_data[u"mean"] = mean(tst_data[u"data"])
2480                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
2481         elif table[u"include-tests"] in (u"MRR", ):
2482             for tst_name, tst_data in col_data[u"data"].items():
2483                 if tst_data[u"data"]:
2484                     tst_data[u"mean"] = tst_data[u"data"][0]
2485                     tst_data[u"stdev"] = tst_data[u"data"][0]
2486
2487         cols.append(col_data)
2488
2489     tbl_dict = dict()
2490     for col in cols:
2491         for tst_name, tst_data in col[u"data"].items():
2492             if tbl_dict.get(tst_name, None) is None:
2493                 tbl_dict[tst_name] = {
2494                     "name": tst_data[u"name"]
2495                 }
2496             tbl_dict[tst_name][col[u"title"]] = {
2497                 u"mean": tst_data[u"mean"],
2498                 u"stdev": tst_data[u"stdev"]
2499             }
2500
2501     tbl_lst = list()
2502     for tst_data in tbl_dict.values():
2503         row = [tst_data[u"name"], ]
2504         for col in cols:
2505             row.append(tst_data.get(col[u"title"], None))
2506         tbl_lst.append(row)
2507
2508     comparisons = table.get(u"comparisons", None)
2509     if comparisons and isinstance(comparisons, list):
2510         for idx, comp in enumerate(comparisons):
2511             try:
2512                 col_ref = int(comp[u"reference"])
2513                 col_cmp = int(comp[u"compare"])
2514             except KeyError:
2515                 logging.warning(u"Comparison: No references defined! Skipping.")
2516                 comparisons.pop(idx)
2517                 continue
2518             if not (0 < col_ref <= len(cols) and
2519                     0 < col_cmp <= len(cols)) or \
2520                     col_ref == col_cmp:
2521                 logging.warning(f"Wrong values of reference={col_ref} "
2522                                 f"and/or compare={col_cmp}. Skipping.")
2523                 comparisons.pop(idx)
2524                 continue
2525
2526     tbl_cmp_lst = list()
2527     if comparisons:
2528         for row in tbl_lst:
2529             new_row = deepcopy(row)
2530             add_to_tbl = False
2531             for comp in comparisons:
2532                 ref_itm = row[int(comp[u"reference"])]
2533                 if ref_itm is None and \
2534                         comp.get(u"reference-alt", None) is not None:
2535                     ref_itm = row[int(comp[u"reference-alt"])]
2536                 cmp_itm = row[int(comp[u"compare"])]
2537                 if ref_itm is not None and cmp_itm is not None and \
2538                         ref_itm[u"mean"] is not None and \
2539                         cmp_itm[u"mean"] is not None and \
2540                         ref_itm[u"stdev"] is not None and \
2541                         cmp_itm[u"stdev"] is not None:
2542                     delta, d_stdev = relative_change_stdev(
2543                         ref_itm[u"mean"], cmp_itm[u"mean"],
2544                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
2545                     )
2546                     new_row.append(
2547                         {
2548                             u"mean": delta * 1e6,
2549                             u"stdev": d_stdev * 1e6
2550                         }
2551                     )
2552                     add_to_tbl = True
2553                 else:
2554                     new_row.append(None)
2555             if add_to_tbl:
2556                 tbl_cmp_lst.append(new_row)
2557
2558     tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2559     tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2560
2561     rcas = list()
2562     rca_in = table.get(u"rca", None)
2563     if rca_in and isinstance(rca_in, list):
2564         for idx, itm in enumerate(rca_in):
2565             try:
2566                 with open(itm.get(u"data", u""), u"r") as rca_file:
2567                     rcas.append(
2568                         {
2569                             u"title": itm.get(u"title", f"RCA{idx}"),
2570                             u"data": load(rca_file, Loader=FullLoader)
2571                         }
2572                     )
2573             except (YAMLError, IOError) as err:
2574                 logging.warning(
2575                     f"The RCA file {itm.get(u'data', u'')} does not exist or "
2576                     f"it is corrupted!"
2577                 )
2578                 logging.debug(repr(err))
2579
2580     tbl_for_csv = list()
2581     for line in tbl_cmp_lst:
2582         row = [line[0], ]
2583         for idx, itm in enumerate(line[1:]):
2584             if itm is None:
2585                 row.append(u"NT")
2586                 row.append(u"NT")
2587             else:
2588                 row.append(round(float(itm[u'mean']) / 1e6, 3))
2589                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2590         for rca in rcas:
2591             rca_nr = rca[u"data"].get(row[0], u"-")
2592             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2593         tbl_for_csv.append(row)
2594
2595     header_csv = [u"Test Case", ]
2596     for col in cols:
2597         header_csv.append(f"Avg({col[u'title']})")
2598         header_csv.append(f"Stdev({col[u'title']})")
2599     for comp in comparisons:
2600         header_csv.append(
2601             f"Avg({comp.get(u'title', u'')})"
2602         )
2603         header_csv.append(
2604             f"Stdev({comp.get(u'title', u'')})"
2605         )
2606     header_csv.extend([rca[u"title"] for rca in rcas])
2607
2608     legend_lst = table.get(u"legend", None)
2609     if legend_lst is None:
2610         legend = u""
2611     else:
2612         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
2613
2614     footnote = u""
2615     for rca in rcas:
2616         footnote += f"\n{rca[u'title']}:\n"
2617         footnote += rca[u"data"].get(u"footnote", u"")
2618
2619     csv_file = f"{table[u'output-file']}-csv.csv"
2620     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2621         file_handler.write(
2622             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
2623         )
2624         for test in tbl_for_csv:
2625             file_handler.write(
2626                 u",".join([f'"{item}"' for item in test]) + u"\n"
2627             )
2628         if legend_lst:
2629             for item in legend_lst:
2630                 file_handler.write(f'"{item}"\n')
2631         if footnote:
2632             for itm in footnote.split(u"\n"):
2633                 file_handler.write(f'"{itm}"\n')
2634
2635     tbl_tmp = list()
2636     max_lens = [0, ] * len(tbl_cmp_lst[0])
2637     for line in tbl_cmp_lst:
2638         row = [line[0], ]
2639         for idx, itm in enumerate(line[1:]):
2640             if itm is None:
2641                 new_itm = u"NT"
2642             else:
2643                 if idx < len(cols):
2644                     new_itm = (
2645                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
2646                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2647                         replace(u"nan", u"NaN")
2648                     )
2649                 else:
2650                     new_itm = (
2651                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2652                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2653                         replace(u"nan", u"NaN")
2654                     )
2655             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2656                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2657             row.append(new_itm)
2658
2659         tbl_tmp.append(row)
2660
2661     tbl_final = list()
2662     for line in tbl_tmp:
2663         row = [line[0], ]
2664         for idx, itm in enumerate(line[1:]):
2665             if itm in (u"NT", u"NaN"):
2666                 row.append(itm)
2667                 continue
2668             itm_lst = itm.rsplit(u"\u00B1", 1)
2669             itm_lst[-1] = \
2670                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2671             row.append(u"\u00B1".join(itm_lst))
2672         for rca in rcas:
2673             rca_nr = rca[u"data"].get(row[0], u"-")
2674             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2675
2676         tbl_final.append(row)
2677
2678     header = [u"Test Case", ]
2679     header.extend([col[u"title"] for col in cols])
2680     header.extend([comp.get(u"title", u"") for comp in comparisons])
2681     header.extend([rca[u"title"] for rca in rcas])
2682
2683     # Generate csv tables:
2684     csv_file = f"{table[u'output-file']}.csv"
2685     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2686         file_handler.write(u";".join(header) + u"\n")
2687         for test in tbl_final:
2688             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2689
2690     # Generate txt table:
2691     txt_file_name = f"{table[u'output-file']}.txt"
2692     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2693
2694     with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2695         txt_file.write(legend)
2696         if footnote:
2697             txt_file.write(footnote)
2698         txt_file.write(u"\n:END")
2699
2700     # Generate html table:
2701     _tpc_generate_html_table(
2702         header,
2703         tbl_final,
2704         table[u'output-file'],
2705         legend=legend,
2706         footnote=footnote,
2707         sort_data=False,
2708         title=table.get(u"title", u"")
2709     )
2710
2711
2712 def table_weekly_comparison(table, in_data):
2713     """Generate the table(s) with algorithm: table_weekly_comparison
2714     specified in the specification file.
2715
2716     :param table: Table to generate.
2717     :param in_data: Data to process.
2718     :type table: pandas.Series
2719     :type in_data: InputData
2720     """
2721     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2722
2723     # Transform the data
2724     logging.info(
2725         f"    Creating the data set for the {table.get(u'type', u'')} "
2726         f"{table.get(u'title', u'')}."
2727     )
2728
2729     incl_tests = table.get(u"include-tests", None)
2730     if incl_tests not in (u"NDR", u"PDR"):
2731         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2732         return
2733
2734     nr_cols = table.get(u"nr-of-data-columns", None)
2735     if not nr_cols or nr_cols < 2:
2736         logging.error(
2737             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2738         )
2739         return
2740
2741     data = in_data.filter_data(
2742         table,
2743         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2744         continue_on_error=True
2745     )
2746
2747     header = [
2748         [u"Version"],
2749         [u"Date", ],
2750         [u"Build", ],
2751         [u"Testbed", ]
2752     ]
2753     tbl_dict = dict()
2754     idx = 0
2755     tb_tbl = table.get(u"testbeds", None)
2756     for job_name, job_data in data.items():
2757         for build_nr, build in job_data.items():
2758             if idx >= nr_cols:
2759                 break
2760             if build.empty:
2761                 continue
2762
2763             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2764             if tb_ip and tb_tbl:
2765                 testbed = tb_tbl.get(tb_ip, u"")
2766             else:
2767                 testbed = u""
2768             header[2].insert(1, build_nr)
2769             header[3].insert(1, testbed)
2770             header[1].insert(
2771                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2772             )
2773             header[0].insert(
2774                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2775             )
2776
2777             for tst_name, tst_data in build.items():
2778                 tst_name_mod = \
2779                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2780                 if not tbl_dict.get(tst_name_mod, None):
2781                     tbl_dict[tst_name_mod] = dict(
2782                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2783                     )
2784                 try:
2785                     tbl_dict[tst_name_mod][-idx - 1] = \
2786                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2787                 except (TypeError, IndexError, KeyError, ValueError):
2788                     pass
2789             idx += 1
2790
2791     if idx < nr_cols:
2792         logging.error(u"Not enough data to build the table! Skipping")
2793         return
2794
2795     cmp_dict = dict()
2796     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2797         idx_ref = cmp.get(u"reference", None)
2798         idx_cmp = cmp.get(u"compare", None)
2799         if idx_ref is None or idx_cmp is None:
2800             continue
2801         header[0].append(f"Diff{idx + 1}")
2802         header[1].append(header[0][idx_ref - idx - 1])
2803         header[2].append(u"vs")
2804         header[3].append(header[0][idx_cmp - idx - 1])
2805         for tst_name, tst_data in tbl_dict.items():
2806             if not cmp_dict.get(tst_name, None):
2807                 cmp_dict[tst_name] = list()
2808             ref_data = tst_data.get(idx_ref, None)
2809             cmp_data = tst_data.get(idx_cmp, None)
2810             if ref_data is None or cmp_data is None:
2811                 cmp_dict[tst_name].append(float('nan'))
2812             else:
2813                 cmp_dict[tst_name].append(
2814                     relative_change(ref_data, cmp_data)
2815                 )
2816
2817     tbl_lst = list()
2818     for tst_name, tst_data in tbl_dict.items():
2819         itm_lst = [tst_data[u"name"], ]
2820         for idx in range(nr_cols):
2821             item = tst_data.get(-idx - 1, None)
2822             if item is None:
2823                 itm_lst.insert(1, None)
2824             else:
2825                 itm_lst.insert(1, round(item / 1e6, 1))
2826         itm_lst.extend(
2827             [
2828                 None if itm is None else round(itm, 1)
2829                 for itm in cmp_dict[tst_name]
2830             ]
2831         )
2832         tbl_lst.append(itm_lst)
2833
2834     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2835     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
2836
2837     # Generate csv table:
2838     csv_file = f"{table[u'output-file']}.csv"
2839     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2840         for hdr in header:
2841             file_handler.write(u",".join(hdr) + u"\n")
2842         for test in tbl_lst:
2843             file_handler.write(u",".join(
2844                 [
2845                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2846                     replace(u"null", u"-") for item in test
2847                 ]
2848             ) + u"\n")
2849
2850     txt_file = f"{table[u'output-file']}.txt"
2851     convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
2852
2853     # Reorganize header in txt table
2854     txt_table = list()
2855     with open(txt_file, u"rt", encoding='utf-8') as file_handler:
2856         for line in file_handler:
2857             txt_table.append(line)
2858     try:
2859         txt_table.insert(5, txt_table.pop(2))
2860         with open(txt_file, u"wt", encoding='utf-8') as file_handler:
2861             file_handler.writelines(txt_table)
2862     except IndexError:
2863         pass
2864
2865     # Generate html table:
2866     hdr_html = [
2867         u"<br>".join(row) for row in zip(*header)
2868     ]
2869     _tpc_generate_html_table(
2870         hdr_html,
2871         tbl_lst,
2872         table[u'output-file'],
2873         sort_data=True,
2874         title=table.get(u"title", u""),
2875         generate_rst=False
2876     )