Report: Comparison html tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
33
34 from pal_utils import mean, stdev, classify_anomalies, \
35     convert_csv_to_pretty_txt, relative_change_stdev
36
37
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
39
40
41 def generate_tables(spec, data):
42     """Generate all tables specified in the specification file.
43
44     :param spec: Specification read from the specification file.
45     :param data: Data to process.
46     :type spec: Specification
47     :type data: InputData
48     """
49
50     generator = {
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100
101     sort_tests = table.get(u"sort", None)
102     if sort_tests:
103         args = dict(
104             inplace=True,
105             ascending=(sort_tests == u"ascending")
106         )
107         data.sort_index(**args)
108
109     suites = input_data.filter_data(
110         table,
111         continue_on_error=True,
112         data_set=u"suites"
113     )
114     if suites.empty:
115         return
116     suites = input_data.merge_data(suites)
117
118     def _generate_html_table(tst_data):
119         """Generate an HTML table with operational data for the given test.
120
121         :param tst_data: Test data to be used to generate the table.
122         :type tst_data: pandas.Series
123         :returns: HTML table with operational data.
124         :rtype: str
125         """
126
127         colors = {
128             u"header": u"#7eade7",
129             u"empty": u"#ffffff",
130             u"body": (u"#e9f1fb", u"#d4e4f7")
131         }
132
133         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134
135         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136         thead = ET.SubElement(
137             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138         )
139         thead.text = tst_data[u"name"]
140
141         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142         thead = ET.SubElement(
143             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144         )
145         thead.text = u"\t"
146
147         if tst_data.get(u"show-run", u"No Data") == u"No Data":
148             trow = ET.SubElement(
149                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150             )
151             tcol = ET.SubElement(
152                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153             )
154             tcol.text = u"No Data"
155
156             trow = ET.SubElement(
157                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158             )
159             thead = ET.SubElement(
160                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161             )
162             font = ET.SubElement(
163                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
164             )
165             font.text = u"."
166             return str(ET.tostring(tbl, encoding=u"unicode"))
167
168         tbl_hdr = (
169             u"Name",
170             u"Nr of Vectors",
171             u"Nr of Packets",
172             u"Suspends",
173             u"Cycles per Packet",
174             u"Average Vector Size"
175         )
176
177         for dut_data in tst_data[u"show-run"].values():
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             if dut_data.get(u"threads", None) is None:
185                 tcol.text = u"No Data"
186                 continue
187
188             bold = ET.SubElement(tcol, u"b")
189             bold.text = (
190                 f"Host IP: {dut_data.get(u'host', '')}, "
191                 f"Socket: {dut_data.get(u'socket', '')}"
192             )
193             trow = ET.SubElement(
194                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195             )
196             thead = ET.SubElement(
197                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
198             )
199             thead.text = u"\t"
200
201             for thread_nr, thread in dut_data[u"threads"].items():
202                 trow = ET.SubElement(
203                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204                 )
205                 tcol = ET.SubElement(
206                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207                 )
208                 bold = ET.SubElement(tcol, u"b")
209                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210                 trow = ET.SubElement(
211                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212                 )
213                 for idx, col in enumerate(tbl_hdr):
214                     tcol = ET.SubElement(
215                         trow, u"td",
216                         attrib=dict(align=u"right" if idx else u"left")
217                     )
218                     font = ET.SubElement(
219                         tcol, u"font", attrib=dict(size=u"2")
220                     )
221                     bold = ET.SubElement(font, u"b")
222                     bold.text = col
223                 for row_nr, row in enumerate(thread):
224                     trow = ET.SubElement(
225                         tbl, u"tr",
226                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227                     )
228                     for idx, col in enumerate(row):
229                         tcol = ET.SubElement(
230                             trow, u"td",
231                             attrib=dict(align=u"right" if idx else u"left")
232                         )
233                         font = ET.SubElement(
234                             tcol, u"font", attrib=dict(size=u"2")
235                         )
236                         if isinstance(col, float):
237                             font.text = f"{col:.2f}"
238                         else:
239                             font.text = str(col)
240                 trow = ET.SubElement(
241                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242                 )
243                 thead = ET.SubElement(
244                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
245                 )
246                 thead.text = u"\t"
247
248         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249         thead = ET.SubElement(
250             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251         )
252         font = ET.SubElement(
253             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
254         )
255         font.text = u"."
256
257         return str(ET.tostring(tbl, encoding=u"unicode"))
258
259     for suite in suites.values:
260         html_table = str()
261         for test_data in data.values:
262             if test_data[u"parent"] not in suite[u"name"]:
263                 continue
264             html_table += _generate_html_table(test_data)
265         if not html_table:
266             continue
267         try:
268             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269             with open(f"{file_name}", u'w') as html_file:
270                 logging.info(f"    Writing file: {file_name}")
271                 html_file.write(u".. raw:: html\n\n\t")
272                 html_file.write(html_table)
273                 html_file.write(u"\n\t<p><br><br></p>\n")
274         except KeyError:
275             logging.warning(u"The output file is not defined.")
276             return
277     logging.info(u"  Done.")
278
279
280 def table_merged_details(table, input_data):
281     """Generate the table(s) with algorithm: table_merged_details
282     specified in the specification file.
283
284     :param table: Table to generate.
285     :param input_data: Data to process.
286     :type table: pandas.Series
287     :type input_data: InputData
288     """
289
290     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
291
292     # Transform the data
293     logging.info(
294         f"    Creating the data set for the {table.get(u'type', u'')} "
295         f"{table.get(u'title', u'')}."
296     )
297     data = input_data.filter_data(table, continue_on_error=True)
298     data = input_data.merge_data(data)
299
300     sort_tests = table.get(u"sort", None)
301     if sort_tests:
302         args = dict(
303             inplace=True,
304             ascending=(sort_tests == u"ascending")
305         )
306         data.sort_index(**args)
307
308     suites = input_data.filter_data(
309         table, continue_on_error=True, data_set=u"suites")
310     suites = input_data.merge_data(suites)
311
312     # Prepare the header of the tables
313     header = list()
314     for column in table[u"columns"]:
315         header.append(
316             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317         )
318
319     for suite in suites.values:
320         # Generate data
321         suite_name = suite[u"name"]
322         table_lst = list()
323         for test in data.keys():
324             if data[test][u"parent"] not in suite_name:
325                 continue
326             row_lst = list()
327             for column in table[u"columns"]:
328                 try:
329                     col_data = str(data[test][column[
330                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
331                     # Do not include tests with "Test Failed" in test message
332                     if u"Test Failed" in col_data:
333                         continue
334                     col_data = col_data.replace(
335                         u"No Data", u"Not Captured     "
336                     )
337                     if column[u"data"].split(u" ")[1] in (u"name", ):
338                         if len(col_data) > 30:
339                             col_data_lst = col_data.split(u"-")
340                             half = int(len(col_data_lst) / 2)
341                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
342                                        f"- |br| " \
343                                        f"{u'-'.join(col_data_lst[half:])}"
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
346                         # Temporary solution: remove NDR results from message:
347                         if bool(table.get(u'remove-ndr', False)):
348                             try:
349                                 col_data = col_data.split(u" |br| ", 1)[1]
350                             except IndexError:
351                                 pass
352                         col_data = f" |prein| {col_data} |preout| "
353                     elif column[u"data"].split(u" ")[1] in \
354                             (u"conf-history", u"show-run"):
355                         col_data = col_data.replace(u" |br| ", u"", 1)
356                         col_data = f" |prein| {col_data[:-5]} |preout| "
357                     row_lst.append(f'"{col_data}"')
358                 except KeyError:
359                     row_lst.append(u'"Not captured"')
360             if len(row_lst) == len(table[u"columns"]):
361                 table_lst.append(row_lst)
362
363         # Write the data to file
364         if table_lst:
365             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367             logging.info(f"      Writing file: {file_name}")
368             with open(file_name, u"wt") as file_handler:
369                 file_handler.write(u",".join(header) + u"\n")
370                 for item in table_lst:
371                     file_handler.write(u",".join(item) + u"\n")
372
373     logging.info(u"  Done.")
374
375
376 def _tpc_modify_test_name(test_name):
377     """Modify a test name by replacing its parts.
378
379     :param test_name: Test name to be modified.
380     :type test_name: str
381     :returns: Modified test name.
382     :rtype: str
383     """
384     test_name_mod = test_name.\
385         replace(u"-ndrpdrdisc", u""). \
386         replace(u"-ndrpdr", u"").\
387         replace(u"-pdrdisc", u""). \
388         replace(u"-ndrdisc", u"").\
389         replace(u"-pdr", u""). \
390         replace(u"-ndr", u""). \
391         replace(u"1t1c", u"1c").\
392         replace(u"2t1c", u"1c"). \
393         replace(u"2t2c", u"2c").\
394         replace(u"4t2c", u"2c"). \
395         replace(u"4t4c", u"4c").\
396         replace(u"8t4c", u"4c")
397
398     return re.sub(REGEX_NIC, u"", test_name_mod)
399
400
401 def _tpc_modify_displayed_test_name(test_name):
402     """Modify a test name which is displayed in a table by replacing its parts.
403
404     :param test_name: Test name to be modified.
405     :type test_name: str
406     :returns: Modified test name.
407     :rtype: str
408     """
409     return test_name.\
410         replace(u"1t1c", u"1c").\
411         replace(u"2t1c", u"1c"). \
412         replace(u"2t2c", u"2c").\
413         replace(u"4t2c", u"2c"). \
414         replace(u"4t4c", u"4c").\
415         replace(u"8t4c", u"4c")
416
417
418 def _tpc_insert_data(target, src, include_tests):
419     """Insert src data to the target structure.
420
421     :param target: Target structure where the data is placed.
422     :param src: Source data to be placed into the target stucture.
423     :param include_tests: Which results will be included (MRR, NDR, PDR).
424     :type target: list
425     :type src: dict
426     :type include_tests: str
427     """
428     try:
429         if include_tests == u"MRR":
430             target.append(
431                 (
432                     src[u"result"][u"receive-rate"],
433                     src[u"result"][u"receive-stdev"]
434                 )
435             )
436         elif include_tests == u"PDR":
437             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438         elif include_tests == u"NDR":
439             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440     except (KeyError, TypeError):
441         pass
442
443
444 def _tpc_sort_table(table):
445     """Sort the table this way:
446
447     1. Put "New in CSIT-XXXX" at the first place.
448     2. Put "See footnote" at the second place.
449     3. Sort the rest by "Delta".
450
451     :param table: Table to sort.
452     :type table: list
453     :returns: Sorted table.
454     :rtype: list
455     """
456
457     tbl_new = list()
458     tbl_see = list()
459     tbl_delta = list()
460     for item in table:
461         if isinstance(item[-1], str):
462             if u"New in CSIT" in item[-1]:
463                 tbl_new.append(item)
464             elif u"See footnote" in item[-1]:
465                 tbl_see.append(item)
466         else:
467             tbl_delta.append(item)
468
469     # Sort the tables:
470     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
475
476     # Put the tables together:
477     table = list()
478     # We do not want "New in CSIT":
479     # table.extend(tbl_new)
480     table.extend(tbl_see)
481     table.extend(tbl_delta)
482
483     return table
484
485
486 def _tpc_generate_html_table(header, data, output_file_name, legend=u"",
487                              footnote=u""):
488     """Generate html table from input data with simple sorting possibility.
489
490     :param header: Table header.
491     :param data: Input data to be included in the table. It is a list of lists.
492         Inner lists are rows in the table. All inner lists must be of the same
493         length. The length of these lists must be the same as the length of the
494         header.
495     :param output_file_name: The name (relative or full path) where the
496         generated html table is written.
497     :param legend: The legend to display below the table.
498     :param footnote: The footnote to display below the table (and legend).
499     :type header: list
500     :type data: list of lists
501     :type output_file_name: str
502     :type legend: str
503     :type footnote: str
504     """
505
506     try:
507         idx = header.index(u"Test Case")
508     except ValueError:
509         idx = 0
510     params = {
511         u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
512         u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
513         u"width": ([28, 9], [4, 24, 10])
514     }
515
516     df_data = pd.DataFrame(data, columns=header)
517
518     df_sorted = [df_data.sort_values(
519         by=[key, header[idx]], ascending=[True, True]
520         if key != header[idx] else [False, True]) for key in header]
521     df_sorted_rev = [df_data.sort_values(
522         by=[key, header[idx]], ascending=[False, True]
523         if key != header[idx] else [True, True]) for key in header]
524     df_sorted.extend(df_sorted_rev)
525
526     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
527                    for idx in range(len(df_data))]]
528     table_header = dict(
529         values=[f"<b>{item}</b>" for item in header],
530         fill_color=u"#7eade7",
531         align=params[u"align-hdr"][idx]
532     )
533
534     fig = go.Figure()
535
536     for table in df_sorted:
537         columns = [table.get(col) for col in header]
538         fig.add_trace(
539             go.Table(
540                 columnwidth=params[u"width"][idx],
541                 header=table_header,
542                 cells=dict(
543                     values=columns,
544                     fill_color=fill_color,
545                     align=params[u"align-itm"][idx]
546                 )
547             )
548         )
549
550     buttons = list()
551     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
552     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
553     menu_items.extend(menu_items_rev)
554     for idx, hdr in enumerate(menu_items):
555         visible = [False, ] * len(menu_items)
556         visible[idx] = True
557         buttons.append(
558             dict(
559                 label=hdr.replace(u" [Mpps]", u""),
560                 method=u"update",
561                 args=[{u"visible": visible}],
562             )
563         )
564
565     fig.update_layout(
566         updatemenus=[
567             go.layout.Updatemenu(
568                 type=u"dropdown",
569                 direction=u"down",
570                 x=0.03,
571                 xanchor=u"left",
572                 y=1.045,
573                 yanchor=u"top",
574                 active=len(menu_items) - 1,
575                 buttons=list(buttons)
576             )
577         ],
578         annotations=[
579             go.layout.Annotation(
580                 text=u"<b>Sort by:</b>",
581                 x=0,
582                 xref=u"paper",
583                 y=1.035,
584                 yref=u"paper",
585                 align=u"left",
586                 showarrow=False
587             )
588         ]
589     )
590
591     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
592
593     # Add legend and footnote:
594     if not (legend or footnote):
595         return
596
597     with open(output_file_name, u"rt") as html_file:
598         html_text = html_file.read()
599     if html_text:
600         try:
601             idx = html_text.rindex(u"</div>")
602         except ValueError:
603             return
604         footnote = (legend + footnote).replace(u'\n', u'<br>')
605         html_text = (
606             html_text[:idx] +
607             f"<div>{footnote}</div>" +
608             html_text[idx:]
609         )
610         with open(output_file_name, u"wt") as html_file:
611             html_file.write(html_text)
612
613
614 def table_perf_comparison(table, input_data):
615     """Generate the table(s) with algorithm: table_perf_comparison
616     specified in the specification file.
617
618     :param table: Table to generate.
619     :param input_data: Data to process.
620     :type table: pandas.Series
621     :type input_data: InputData
622     """
623
624     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
625
626     # Transform the data
627     logging.info(
628         f"    Creating the data set for the {table.get(u'type', u'')} "
629         f"{table.get(u'title', u'')}."
630     )
631     data = input_data.filter_data(table, continue_on_error=True)
632
633     # Prepare the header of the tables
634     try:
635         header = [u"Test Case", ]
636         legend = u"\nLegend:\n"
637
638         rca_data = None
639         rca = table.get(u"rca", None)
640         if rca:
641             try:
642                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
643                     rca_data = load(rca_file, Loader=FullLoader)
644                 header.insert(0, rca.get(u"title", "RCA"))
645                 legend += (
646                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
647                 )
648             except (YAMLError, IOError) as err:
649                 logging.warning(repr(err))
650
651         history = table.get(u"history", list())
652         for item in history:
653             header.extend(
654                 [
655                     f"{item[u'title']} Avg({table[u'include-tests']})",
656                     f"{item[u'title']} Stdev({table[u'include-tests']})"
657                 ]
658             )
659             legend += (
660                 f"{item[u'title']} Avg({table[u'include-tests']}): "
661                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
662                 f"a series of runs of the listed tests executed against "
663                 f"rls{item[u'title']}.\n"
664                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
665                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
666                 f"computed from a series of runs of the listed tests executed "
667                 f"against rls{item[u'title']}.\n"
668             )
669         header.extend(
670             [
671                 f"{table[u'reference'][u'title']} "
672                 f"Avg({table[u'include-tests']})",
673                 f"{table[u'reference'][u'title']} "
674                 f"Stdev({table[u'include-tests']})",
675                 f"{table[u'compare'][u'title']} "
676                 f"Avg({table[u'include-tests']})",
677                 f"{table[u'compare'][u'title']} "
678                 f"Stdev({table[u'include-tests']})",
679                 f"Diff({table[u'reference'][u'title']},"
680                 f"{table[u'compare'][u'title']})",
681                 u"Stdev(Diff)"
682             ]
683         )
684         header_str = u";".join(header) + u"\n"
685         legend += (
686             f"{table[u'reference'][u'title']} "
687             f"Avg({table[u'include-tests']}): "
688             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
689             f"series of runs of the listed tests executed against "
690             f"rls{table[u'reference'][u'title']}.\n"
691             f"{table[u'reference'][u'title']} "
692             f"Stdev({table[u'include-tests']}): "
693             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
694             f"computed from a series of runs of the listed tests executed "
695             f"against rls{table[u'reference'][u'title']}.\n"
696             f"{table[u'compare'][u'title']} "
697             f"Avg({table[u'include-tests']}): "
698             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
699             f"series of runs of the listed tests executed against "
700             f"rls{table[u'compare'][u'title']}.\n"
701             f"{table[u'compare'][u'title']} "
702             f"Stdev({table[u'include-tests']}): "
703             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
704             f"computed from a series of runs of the listed tests executed "
705             f"against rls{table[u'compare'][u'title']}.\n"
706             f"Diff({table[u'reference'][u'title']},"
707             f"{table[u'compare'][u'title']}): "
708             f"Percentage change calculated for mean values.\n"
709             u"Stdev(Diff): "
710             u"Standard deviation of percentage change calculated for mean "
711             u"values.\n"
712             u"NT: Not Tested\n"
713         )
714     except (AttributeError, KeyError) as err:
715         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
716         return
717
718     # Prepare data to the table:
719     tbl_dict = dict()
720     for job, builds in table[u"reference"][u"data"].items():
721         for build in builds:
722             for tst_name, tst_data in data[job][str(build)].items():
723                 tst_name_mod = _tpc_modify_test_name(tst_name)
724                 if (u"across topologies" in table[u"title"].lower() or
725                         (u" 3n-" in table[u"title"].lower() and
726                          u" 2n-" in table[u"title"].lower())):
727                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
728                 if tbl_dict.get(tst_name_mod, None) is None:
729                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
730                     nic = groups.group(0) if groups else u""
731                     name = \
732                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
733                     if u"across testbeds" in table[u"title"].lower() or \
734                             u"across topologies" in table[u"title"].lower():
735                         name = _tpc_modify_displayed_test_name(name)
736                     tbl_dict[tst_name_mod] = {
737                         u"name": name,
738                         u"ref-data": list(),
739                         u"cmp-data": list()
740                     }
741                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
742                                  src=tst_data,
743                                  include_tests=table[u"include-tests"])
744
745     replacement = table[u"reference"].get(u"data-replacement", None)
746     if replacement:
747         create_new_list = True
748         rpl_data = input_data.filter_data(
749             table, data=replacement, continue_on_error=True)
750         for job, builds in replacement.items():
751             for build in builds:
752                 for tst_name, tst_data in rpl_data[job][str(build)].items():
753                     tst_name_mod = _tpc_modify_test_name(tst_name)
754                     if (u"across topologies" in table[u"title"].lower() or
755                             (u" 3n-" in table[u"title"].lower() and
756                              u" 2n-" in table[u"title"].lower())):
757                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
758                     if tbl_dict.get(tst_name_mod, None) is None:
759                         name = \
760                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
761                         if u"across testbeds" in table[u"title"].lower() or \
762                                 u"across topologies" in table[u"title"].lower():
763                             name = _tpc_modify_displayed_test_name(name)
764                         tbl_dict[tst_name_mod] = {
765                             u"name": name,
766                             u"ref-data": list(),
767                             u"cmp-data": list()
768                         }
769                     if create_new_list:
770                         create_new_list = False
771                         tbl_dict[tst_name_mod][u"ref-data"] = list()
772
773                     _tpc_insert_data(
774                         target=tbl_dict[tst_name_mod][u"ref-data"],
775                         src=tst_data,
776                         include_tests=table[u"include-tests"]
777                     )
778
779     for job, builds in table[u"compare"][u"data"].items():
780         for build in builds:
781             for tst_name, tst_data in data[job][str(build)].items():
782                 tst_name_mod = _tpc_modify_test_name(tst_name)
783                 if (u"across topologies" in table[u"title"].lower() or
784                         (u" 3n-" in table[u"title"].lower() and
785                          u" 2n-" in table[u"title"].lower())):
786                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
787                 if tbl_dict.get(tst_name_mod, None) is None:
788                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
789                     nic = groups.group(0) if groups else u""
790                     name = \
791                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
792                     if u"across testbeds" in table[u"title"].lower() or \
793                             u"across topologies" in table[u"title"].lower():
794                         name = _tpc_modify_displayed_test_name(name)
795                     tbl_dict[tst_name_mod] = {
796                         u"name": name,
797                         u"ref-data": list(),
798                         u"cmp-data": list()
799                     }
800                 _tpc_insert_data(
801                     target=tbl_dict[tst_name_mod][u"cmp-data"],
802                     src=tst_data,
803                     include_tests=table[u"include-tests"]
804                 )
805
806     replacement = table[u"compare"].get(u"data-replacement", None)
807     if replacement:
808         create_new_list = True
809         rpl_data = input_data.filter_data(
810             table, data=replacement, continue_on_error=True)
811         for job, builds in replacement.items():
812             for build in builds:
813                 for tst_name, tst_data in rpl_data[job][str(build)].items():
814                     tst_name_mod = _tpc_modify_test_name(tst_name)
815                     if (u"across topologies" in table[u"title"].lower() or
816                             (u" 3n-" in table[u"title"].lower() and
817                              u" 2n-" in table[u"title"].lower())):
818                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
819                     if tbl_dict.get(tst_name_mod, None) is None:
820                         name = \
821                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
822                         if u"across testbeds" in table[u"title"].lower() or \
823                                 u"across topologies" in table[u"title"].lower():
824                             name = _tpc_modify_displayed_test_name(name)
825                         tbl_dict[tst_name_mod] = {
826                             u"name": name,
827                             u"ref-data": list(),
828                             u"cmp-data": list()
829                         }
830                     if create_new_list:
831                         create_new_list = False
832                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
833
834                     _tpc_insert_data(
835                         target=tbl_dict[tst_name_mod][u"cmp-data"],
836                         src=tst_data,
837                         include_tests=table[u"include-tests"]
838                     )
839
840     for item in history:
841         for job, builds in item[u"data"].items():
842             for build in builds:
843                 for tst_name, tst_data in data[job][str(build)].items():
844                     tst_name_mod = _tpc_modify_test_name(tst_name)
845                     if (u"across topologies" in table[u"title"].lower() or
846                             (u" 3n-" in table[u"title"].lower() and
847                              u" 2n-" in table[u"title"].lower())):
848                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
849                     if tbl_dict.get(tst_name_mod, None) is None:
850                         continue
851                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
852                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
853                     if tbl_dict[tst_name_mod][u"history"].\
854                             get(item[u"title"], None) is None:
855                         tbl_dict[tst_name_mod][u"history"][item[
856                             u"title"]] = list()
857                     try:
858                         if table[u"include-tests"] == u"MRR":
859                             res = (tst_data[u"result"][u"receive-rate"],
860                                    tst_data[u"result"][u"receive-stdev"])
861                         elif table[u"include-tests"] == u"PDR":
862                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
863                         elif table[u"include-tests"] == u"NDR":
864                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
865                         else:
866                             continue
867                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
868                             append(res)
869                     except (TypeError, KeyError):
870                         pass
871
872     tbl_lst = list()
873     for tst_name in tbl_dict:
874         item = [tbl_dict[tst_name][u"name"], ]
875         if history:
876             if tbl_dict[tst_name].get(u"history", None) is not None:
877                 for hist_data in tbl_dict[tst_name][u"history"].values():
878                     if hist_data:
879                         if table[u"include-tests"] == u"MRR":
880                             item.append(round(hist_data[0][0] / 1e6, 1))
881                             item.append(round(hist_data[0][1] / 1e6, 1))
882                         else:
883                             item.append(round(mean(hist_data) / 1e6, 1))
884                             item.append(round(stdev(hist_data) / 1e6, 1))
885                     else:
886                         item.extend([u"NT", u"NT"])
887             else:
888                 item.extend([u"NT", u"NT"])
889         data_r = tbl_dict[tst_name][u"ref-data"]
890         if data_r:
891             if table[u"include-tests"] == u"MRR":
892                 data_r_mean = data_r[0][0]
893                 data_r_stdev = data_r[0][1]
894             else:
895                 data_r_mean = mean(data_r)
896                 data_r_stdev = stdev(data_r)
897             item.append(round(data_r_mean / 1e6, 1))
898             item.append(round(data_r_stdev / 1e6, 1))
899         else:
900             data_r_mean = None
901             data_r_stdev = None
902             item.extend([u"NT", u"NT"])
903         data_c = tbl_dict[tst_name][u"cmp-data"]
904         if data_c:
905             if table[u"include-tests"] == u"MRR":
906                 data_c_mean = data_c[0][0]
907                 data_c_stdev = data_c[0][1]
908             else:
909                 data_c_mean = mean(data_c)
910                 data_c_stdev = stdev(data_c)
911             item.append(round(data_c_mean / 1e6, 1))
912             item.append(round(data_c_stdev / 1e6, 1))
913         else:
914             data_c_mean = None
915             data_c_stdev = None
916             item.extend([u"NT", u"NT"])
917         if item[-2] == u"NT":
918             pass
919         elif item[-4] == u"NT":
920             item.append(u"New in CSIT-2001")
921             item.append(u"New in CSIT-2001")
922         elif data_r_mean is not None and data_c_mean is not None:
923             delta, d_stdev = relative_change_stdev(
924                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
925             )
926             try:
927                 item.append(round(delta))
928             except ValueError:
929                 item.append(delta)
930             try:
931                 item.append(round(d_stdev))
932             except ValueError:
933                 item.append(d_stdev)
934         if rca_data:
935             rca_nr = rca_data.get(item[0], u"-")
936             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
937         if (len(item) == len(header)) and (item[-4] != u"NT"):
938             tbl_lst.append(item)
939
940     tbl_lst = _tpc_sort_table(tbl_lst)
941
942     # Generate csv tables:
943     csv_file = f"{table[u'output-file']}.csv"
944     with open(csv_file, u"wt") as file_handler:
945         file_handler.write(header_str)
946         for test in tbl_lst:
947             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
948
949     txt_file_name = f"{table[u'output-file']}.txt"
950     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
951
952     footnote = u""
953     with open(txt_file_name, u'a') as txt_file:
954         txt_file.write(legend)
955         if rca_data:
956             footnote = rca_data.get(u"footnote", u"")
957             if footnote:
958                 txt_file.write(footnote)
959         txt_file.write(u":END")
960
961     # Generate html table:
962     _tpc_generate_html_table(
963         header,
964         tbl_lst,
965         f"{table[u'output-file']}.html",
966         legend=legend,
967         footnote=footnote
968     )
969
970
971 def table_perf_comparison_nic(table, input_data):
972     """Generate the table(s) with algorithm: table_perf_comparison
973     specified in the specification file.
974
975     :param table: Table to generate.
976     :param input_data: Data to process.
977     :type table: pandas.Series
978     :type input_data: InputData
979     """
980
981     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
982
983     # Transform the data
984     logging.info(
985         f"    Creating the data set for the {table.get(u'type', u'')} "
986         f"{table.get(u'title', u'')}."
987     )
988     data = input_data.filter_data(table, continue_on_error=True)
989
990     # Prepare the header of the tables
991     try:
992         header = [u"Test Case", ]
993         legend = u"\nLegend:\n"
994
995         rca_data = None
996         rca = table.get(u"rca", None)
997         if rca:
998             try:
999                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1000                     rca_data = load(rca_file, Loader=FullLoader)
1001                 header.insert(0, rca.get(u"title", "RCA"))
1002                 legend += (
1003                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
1004                 )
1005             except (YAMLError, IOError) as err:
1006                 logging.warning(repr(err))
1007
1008         history = table.get(u"history", list())
1009         for item in history:
1010             header.extend(
1011                 [
1012                     f"{item[u'title']} Avg({table[u'include-tests']})",
1013                     f"{item[u'title']} Stdev({table[u'include-tests']})"
1014                 ]
1015             )
1016             legend += (
1017                 f"{item[u'title']} Avg({table[u'include-tests']}): "
1018                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1019                 f"a series of runs of the listed tests executed against "
1020                 f"rls{item[u'title']}.\n"
1021                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1022                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1023                 f"computed from a series of runs of the listed tests executed "
1024                 f"against rls{item[u'title']}.\n"
1025             )
1026         header.extend(
1027             [
1028                 f"{table[u'reference'][u'title']} "
1029                 f"Avg({table[u'include-tests']})",
1030                 f"{table[u'reference'][u'title']} "
1031                 f"Stdev({table[u'include-tests']})",
1032                 f"{table[u'compare'][u'title']} "
1033                 f"Avg({table[u'include-tests']})",
1034                 f"{table[u'compare'][u'title']} "
1035                 f"Stdev({table[u'include-tests']})",
1036                 f"Diff({table[u'reference'][u'title']},"
1037                 f"{table[u'compare'][u'title']})",
1038                 u"Stdev(Diff)"
1039             ]
1040         )
1041         header_str = u";".join(header) + u"\n"
1042         legend += (
1043             f"{table[u'reference'][u'title']} "
1044             f"Avg({table[u'include-tests']}): "
1045             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1046             f"series of runs of the listed tests executed against "
1047             f"rls{table[u'reference'][u'title']}.\n"
1048             f"{table[u'reference'][u'title']} "
1049             f"Stdev({table[u'include-tests']}): "
1050             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1051             f"computed from a series of runs of the listed tests executed "
1052             f"against rls{table[u'reference'][u'title']}.\n"
1053             f"{table[u'compare'][u'title']} "
1054             f"Avg({table[u'include-tests']}): "
1055             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1056             f"series of runs of the listed tests executed against "
1057             f"rls{table[u'compare'][u'title']}.\n"
1058             f"{table[u'compare'][u'title']} "
1059             f"Stdev({table[u'include-tests']}): "
1060             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1061             f"computed from a series of runs of the listed tests executed "
1062             f"against rls{table[u'compare'][u'title']}.\n"
1063             f"Diff({table[u'reference'][u'title']},"
1064             f"{table[u'compare'][u'title']}): "
1065             f"Percentage change calculated for mean values.\n"
1066             u"Stdev(Diff): "
1067             u"Standard deviation of percentage change calculated for mean "
1068             u"values.\n"
1069             u"NT: Not Tested\n"
1070         )
1071     except (AttributeError, KeyError) as err:
1072         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1073         return
1074
1075     # Prepare data to the table:
1076     tbl_dict = dict()
1077     for job, builds in table[u"reference"][u"data"].items():
1078         for build in builds:
1079             for tst_name, tst_data in data[job][str(build)].items():
1080                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1081                     continue
1082                 tst_name_mod = _tpc_modify_test_name(tst_name)
1083                 if (u"across topologies" in table[u"title"].lower() or
1084                         (u" 3n-" in table[u"title"].lower() and
1085                          u" 2n-" in table[u"title"].lower())):
1086                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1087                 if tbl_dict.get(tst_name_mod, None) is None:
1088                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1089                     if u"across testbeds" in table[u"title"].lower() or \
1090                             u"across topologies" in table[u"title"].lower():
1091                         name = _tpc_modify_displayed_test_name(name)
1092                     tbl_dict[tst_name_mod] = {
1093                         u"name": name,
1094                         u"ref-data": list(),
1095                         u"cmp-data": list()
1096                     }
1097                 _tpc_insert_data(
1098                     target=tbl_dict[tst_name_mod][u"ref-data"],
1099                     src=tst_data,
1100                     include_tests=table[u"include-tests"]
1101                 )
1102
1103     replacement = table[u"reference"].get(u"data-replacement", None)
1104     if replacement:
1105         create_new_list = True
1106         rpl_data = input_data.filter_data(
1107             table, data=replacement, continue_on_error=True)
1108         for job, builds in replacement.items():
1109             for build in builds:
1110                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1111                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1112                         continue
1113                     tst_name_mod = _tpc_modify_test_name(tst_name)
1114                     if (u"across topologies" in table[u"title"].lower() or
1115                             (u" 3n-" in table[u"title"].lower() and
1116                              u" 2n-" in table[u"title"].lower())):
1117                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1118                     if tbl_dict.get(tst_name_mod, None) is None:
1119                         name = \
1120                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1121                         if u"across testbeds" in table[u"title"].lower() or \
1122                                 u"across topologies" in table[u"title"].lower():
1123                             name = _tpc_modify_displayed_test_name(name)
1124                         tbl_dict[tst_name_mod] = {
1125                             u"name": name,
1126                             u"ref-data": list(),
1127                             u"cmp-data": list()
1128                         }
1129                     if create_new_list:
1130                         create_new_list = False
1131                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1132
1133                     _tpc_insert_data(
1134                         target=tbl_dict[tst_name_mod][u"ref-data"],
1135                         src=tst_data,
1136                         include_tests=table[u"include-tests"]
1137                     )
1138
1139     for job, builds in table[u"compare"][u"data"].items():
1140         for build in builds:
1141             for tst_name, tst_data in data[job][str(build)].items():
1142                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1143                     continue
1144                 tst_name_mod = _tpc_modify_test_name(tst_name)
1145                 if (u"across topologies" in table[u"title"].lower() or
1146                         (u" 3n-" in table[u"title"].lower() and
1147                          u" 2n-" in table[u"title"].lower())):
1148                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1149                 if tbl_dict.get(tst_name_mod, None) is None:
1150                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1151                     if u"across testbeds" in table[u"title"].lower() or \
1152                             u"across topologies" in table[u"title"].lower():
1153                         name = _tpc_modify_displayed_test_name(name)
1154                     tbl_dict[tst_name_mod] = {
1155                         u"name": name,
1156                         u"ref-data": list(),
1157                         u"cmp-data": list()
1158                     }
1159                 _tpc_insert_data(
1160                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1161                     src=tst_data,
1162                     include_tests=table[u"include-tests"]
1163                 )
1164
1165     replacement = table[u"compare"].get(u"data-replacement", None)
1166     if replacement:
1167         create_new_list = True
1168         rpl_data = input_data.filter_data(
1169             table, data=replacement, continue_on_error=True)
1170         for job, builds in replacement.items():
1171             for build in builds:
1172                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1173                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1174                         continue
1175                     tst_name_mod = _tpc_modify_test_name(tst_name)
1176                     if (u"across topologies" in table[u"title"].lower() or
1177                             (u" 3n-" in table[u"title"].lower() and
1178                              u" 2n-" in table[u"title"].lower())):
1179                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1180                     if tbl_dict.get(tst_name_mod, None) is None:
1181                         name = \
1182                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1183                         if u"across testbeds" in table[u"title"].lower() or \
1184                                 u"across topologies" in table[u"title"].lower():
1185                             name = _tpc_modify_displayed_test_name(name)
1186                         tbl_dict[tst_name_mod] = {
1187                             u"name": name,
1188                             u"ref-data": list(),
1189                             u"cmp-data": list()
1190                         }
1191                     if create_new_list:
1192                         create_new_list = False
1193                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1194
1195                     _tpc_insert_data(
1196                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1197                         src=tst_data,
1198                         include_tests=table[u"include-tests"]
1199                     )
1200
1201     for item in history:
1202         for job, builds in item[u"data"].items():
1203             for build in builds:
1204                 for tst_name, tst_data in data[job][str(build)].items():
1205                     if item[u"nic"] not in tst_data[u"tags"]:
1206                         continue
1207                     tst_name_mod = _tpc_modify_test_name(tst_name)
1208                     if (u"across topologies" in table[u"title"].lower() or
1209                             (u" 3n-" in table[u"title"].lower() and
1210                              u" 2n-" in table[u"title"].lower())):
1211                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1212                     if tbl_dict.get(tst_name_mod, None) is None:
1213                         continue
1214                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1215                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1216                     if tbl_dict[tst_name_mod][u"history"].\
1217                             get(item[u"title"], None) is None:
1218                         tbl_dict[tst_name_mod][u"history"][item[
1219                             u"title"]] = list()
1220                     try:
1221                         if table[u"include-tests"] == u"MRR":
1222                             res = (tst_data[u"result"][u"receive-rate"],
1223                                    tst_data[u"result"][u"receive-stdev"])
1224                         elif table[u"include-tests"] == u"PDR":
1225                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1226                         elif table[u"include-tests"] == u"NDR":
1227                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1228                         else:
1229                             continue
1230                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1231                             append(res)
1232                     except (TypeError, KeyError):
1233                         pass
1234
1235     tbl_lst = list()
1236     for tst_name in tbl_dict:
1237         item = [tbl_dict[tst_name][u"name"], ]
1238         if history:
1239             if tbl_dict[tst_name].get(u"history", None) is not None:
1240                 for hist_data in tbl_dict[tst_name][u"history"].values():
1241                     if hist_data:
1242                         if table[u"include-tests"] == u"MRR":
1243                             item.append(round(hist_data[0][0] / 1e6, 1))
1244                             item.append(round(hist_data[0][1] / 1e6, 1))
1245                         else:
1246                             item.append(round(mean(hist_data) / 1e6, 1))
1247                             item.append(round(stdev(hist_data) / 1e6, 1))
1248                     else:
1249                         item.extend([u"NT", u"NT"])
1250             else:
1251                 item.extend([u"NT", u"NT"])
1252         data_r = tbl_dict[tst_name][u"ref-data"]
1253         if data_r:
1254             if table[u"include-tests"] == u"MRR":
1255                 data_r_mean = data_r[0][0]
1256                 data_r_stdev = data_r[0][1]
1257             else:
1258                 data_r_mean = mean(data_r)
1259                 data_r_stdev = stdev(data_r)
1260             item.append(round(data_r_mean / 1e6, 1))
1261             item.append(round(data_r_stdev / 1e6, 1))
1262         else:
1263             data_r_mean = None
1264             data_r_stdev = None
1265             item.extend([u"NT", u"NT"])
1266         data_c = tbl_dict[tst_name][u"cmp-data"]
1267         if data_c:
1268             if table[u"include-tests"] == u"MRR":
1269                 data_c_mean = data_c[0][0]
1270                 data_c_stdev = data_c[0][1]
1271             else:
1272                 data_c_mean = mean(data_c)
1273                 data_c_stdev = stdev(data_c)
1274             item.append(round(data_c_mean / 1e6, 1))
1275             item.append(round(data_c_stdev / 1e6, 1))
1276         else:
1277             data_c_mean = None
1278             data_c_stdev = None
1279             item.extend([u"NT", u"NT"])
1280         if item[-2] == u"NT":
1281             pass
1282         elif item[-4] == u"NT":
1283             item.append(u"New in CSIT-2001")
1284             item.append(u"New in CSIT-2001")
1285         elif data_r_mean is not None and data_c_mean is not None:
1286             delta, d_stdev = relative_change_stdev(
1287                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1288             )
1289             try:
1290                 item.append(round(delta))
1291             except ValueError:
1292                 item.append(delta)
1293             try:
1294                 item.append(round(d_stdev))
1295             except ValueError:
1296                 item.append(d_stdev)
1297         if rca_data:
1298             rca_nr = rca_data.get(item[0], u"-")
1299             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1300         if (len(item) == len(header)) and (item[-4] != u"NT"):
1301             tbl_lst.append(item)
1302
1303     tbl_lst = _tpc_sort_table(tbl_lst)
1304
1305     # Generate csv tables:
1306     csv_file = f"{table[u'output-file']}.csv"
1307     with open(csv_file, u"wt") as file_handler:
1308         file_handler.write(header_str)
1309         for test in tbl_lst:
1310             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1311
1312     txt_file_name = f"{table[u'output-file']}.txt"
1313     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1314
1315     footnote = u""
1316     with open(txt_file_name, u'a') as txt_file:
1317         txt_file.write(legend)
1318         if rca_data:
1319             footnote = rca_data.get(u"footnote", u"")
1320             if footnote:
1321                 txt_file.write(footnote)
1322         txt_file.write(u":END")
1323
1324     # Generate html table:
1325     _tpc_generate_html_table(
1326         header,
1327         tbl_lst,
1328         f"{table[u'output-file']}.html",
1329         legend=legend,
1330         footnote=footnote
1331     )
1332
1333
1334 def table_nics_comparison(table, input_data):
1335     """Generate the table(s) with algorithm: table_nics_comparison
1336     specified in the specification file.
1337
1338     :param table: Table to generate.
1339     :param input_data: Data to process.
1340     :type table: pandas.Series
1341     :type input_data: InputData
1342     """
1343
1344     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1345
1346     # Transform the data
1347     logging.info(
1348         f"    Creating the data set for the {table.get(u'type', u'')} "
1349         f"{table.get(u'title', u'')}."
1350     )
1351     data = input_data.filter_data(table, continue_on_error=True)
1352
1353     # Prepare the header of the tables
1354     try:
1355         header = [
1356             u"Test Case",
1357             f"{table[u'reference'][u'title']} "
1358             f"Avg({table[u'include-tests']})",
1359             f"{table[u'reference'][u'title']} "
1360             f"Stdev({table[u'include-tests']})",
1361             f"{table[u'compare'][u'title']} "
1362             f"Avg({table[u'include-tests']})",
1363             f"{table[u'compare'][u'title']} "
1364             f"Stdev({table[u'include-tests']})",
1365             f"Diff({table[u'reference'][u'title']},"
1366             f"{table[u'compare'][u'title']})",
1367             u"Stdev(Diff)"
1368         ]
1369         legend = (
1370             u"\nLegend:\n"
1371             f"{table[u'reference'][u'title']} "
1372             f"Avg({table[u'include-tests']}): "
1373             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1374             f"series of runs of the listed tests executed using "
1375             f"{table[u'reference'][u'title']} NIC.\n"
1376             f"{table[u'reference'][u'title']} "
1377             f"Stdev({table[u'include-tests']}): "
1378             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1379             f"computed from a series of runs of the listed tests executed "
1380             f"using {table[u'reference'][u'title']} NIC.\n"
1381             f"{table[u'compare'][u'title']} "
1382             f"Avg({table[u'include-tests']}): "
1383             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1384             f"series of runs of the listed tests executed using "
1385             f"{table[u'compare'][u'title']} NIC.\n"
1386             f"{table[u'compare'][u'title']} "
1387             f"Stdev({table[u'include-tests']}): "
1388             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1389             f"computed from a series of runs of the listed tests executed "
1390             f"using {table[u'compare'][u'title']} NIC.\n"
1391             f"Diff({table[u'reference'][u'title']},"
1392             f"{table[u'compare'][u'title']}): "
1393             f"Percentage change calculated for mean values.\n"
1394             u"Stdev(Diff): "
1395             u"Standard deviation of percentage change calculated for mean "
1396             u"values.\n"
1397             u":END"
1398         )
1399
1400     except (AttributeError, KeyError) as err:
1401         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1402         return
1403
1404     # Prepare data to the table:
1405     tbl_dict = dict()
1406     for job, builds in table[u"data"].items():
1407         for build in builds:
1408             for tst_name, tst_data in data[job][str(build)].items():
1409                 tst_name_mod = _tpc_modify_test_name(tst_name)
1410                 if tbl_dict.get(tst_name_mod, None) is None:
1411                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1412                     tbl_dict[tst_name_mod] = {
1413                         u"name": name,
1414                         u"ref-data": list(),
1415                         u"cmp-data": list()
1416                     }
1417                 try:
1418                     if table[u"include-tests"] == u"MRR":
1419                         result = (tst_data[u"result"][u"receive-rate"],
1420                                   tst_data[u"result"][u"receive-stdev"])
1421                     elif table[u"include-tests"] == u"PDR":
1422                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1423                     elif table[u"include-tests"] == u"NDR":
1424                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1425                     else:
1426                         continue
1427
1428                     if result and \
1429                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1430                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1431                     elif result and \
1432                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1433                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1434                 except (TypeError, KeyError) as err:
1435                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1436                     # No data in output.xml for this test
1437
1438     tbl_lst = list()
1439     for tst_name in tbl_dict:
1440         item = [tbl_dict[tst_name][u"name"], ]
1441         data_r = tbl_dict[tst_name][u"ref-data"]
1442         if data_r:
1443             if table[u"include-tests"] == u"MRR":
1444                 data_r_mean = data_r[0][0]
1445                 data_r_stdev = data_r[0][1]
1446             else:
1447                 data_r_mean = mean(data_r)
1448                 data_r_stdev = stdev(data_r)
1449             item.append(round(data_r_mean / 1e6, 1))
1450             item.append(round(data_r_stdev / 1e6, 1))
1451         else:
1452             data_r_mean = None
1453             data_r_stdev = None
1454             item.extend([None, None])
1455         data_c = tbl_dict[tst_name][u"cmp-data"]
1456         if data_c:
1457             if table[u"include-tests"] == u"MRR":
1458                 data_c_mean = data_c[0][0]
1459                 data_c_stdev = data_c[0][1]
1460             else:
1461                 data_c_mean = mean(data_c)
1462                 data_c_stdev = stdev(data_c)
1463             item.append(round(data_c_mean / 1e6, 1))
1464             item.append(round(data_c_stdev / 1e6, 1))
1465         else:
1466             data_c_mean = None
1467             data_c_stdev = None
1468             item.extend([None, None])
1469         if data_r_mean is not None and data_c_mean is not None:
1470             delta, d_stdev = relative_change_stdev(
1471                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1472             )
1473             try:
1474                 item.append(round(delta))
1475             except ValueError:
1476                 item.append(delta)
1477             try:
1478                 item.append(round(d_stdev))
1479             except ValueError:
1480                 item.append(d_stdev)
1481             tbl_lst.append(item)
1482
1483     # Sort the table according to the relative change
1484     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1485
1486     # Generate csv tables:
1487     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1488         file_handler.write(u";".join(header) + u"\n")
1489         for test in tbl_lst:
1490             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1491
1492     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1493                               f"{table[u'output-file']}.txt",
1494                               delimiter=u";")
1495
1496     with open(table[u'output-file'], u'a') as txt_file:
1497         txt_file.write(legend)
1498
1499     # Generate html table:
1500     _tpc_generate_html_table(
1501         header,
1502         tbl_lst,
1503         f"{table[u'output-file']}.html",
1504         legend=legend
1505     )
1506
1507
1508 def table_soak_vs_ndr(table, input_data):
1509     """Generate the table(s) with algorithm: table_soak_vs_ndr
1510     specified in the specification file.
1511
1512     :param table: Table to generate.
1513     :param input_data: Data to process.
1514     :type table: pandas.Series
1515     :type input_data: InputData
1516     """
1517
1518     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1519
1520     # Transform the data
1521     logging.info(
1522         f"    Creating the data set for the {table.get(u'type', u'')} "
1523         f"{table.get(u'title', u'')}."
1524     )
1525     data = input_data.filter_data(table, continue_on_error=True)
1526
1527     # Prepare the header of the table
1528     try:
1529         header = [
1530             u"Test Case",
1531             f"Avg({table[u'reference'][u'title']})",
1532             f"Stdev({table[u'reference'][u'title']})",
1533             f"Avg({table[u'compare'][u'title']})",
1534             f"Stdev{table[u'compare'][u'title']})",
1535             u"Diff",
1536             u"Stdev(Diff)"
1537         ]
1538         header_str = u";".join(header) + u"\n"
1539         legend = (
1540             u"\nLegend:\n"
1541             f"Avg({table[u'reference'][u'title']}): "
1542             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1543             f"from a series of runs of the listed tests.\n"
1544             f"Stdev({table[u'reference'][u'title']}): "
1545             f"Standard deviation value of {table[u'reference'][u'title']} "
1546             f"[Mpps] computed from a series of runs of the listed tests.\n"
1547             f"Avg({table[u'compare'][u'title']}): "
1548             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1549             f"a series of runs of the listed tests.\n"
1550             f"Stdev({table[u'compare'][u'title']}): "
1551             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1552             f"computed from a series of runs of the listed tests.\n"
1553             f"Diff({table[u'reference'][u'title']},"
1554             f"{table[u'compare'][u'title']}): "
1555             f"Percentage change calculated for mean values.\n"
1556             u"Stdev(Diff): "
1557             u"Standard deviation of percentage change calculated for mean "
1558             u"values.\n"
1559             u":END"
1560         )
1561     except (AttributeError, KeyError) as err:
1562         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1563         return
1564
1565     # Create a list of available SOAK test results:
1566     tbl_dict = dict()
1567     for job, builds in table[u"compare"][u"data"].items():
1568         for build in builds:
1569             for tst_name, tst_data in data[job][str(build)].items():
1570                 if tst_data[u"type"] == u"SOAK":
1571                     tst_name_mod = tst_name.replace(u"-soak", u"")
1572                     if tbl_dict.get(tst_name_mod, None) is None:
1573                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1574                         nic = groups.group(0) if groups else u""
1575                         name = (
1576                             f"{nic}-"
1577                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1578                         )
1579                         tbl_dict[tst_name_mod] = {
1580                             u"name": name,
1581                             u"ref-data": list(),
1582                             u"cmp-data": list()
1583                         }
1584                     try:
1585                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1586                             tst_data[u"throughput"][u"LOWER"])
1587                     except (KeyError, TypeError):
1588                         pass
1589     tests_lst = tbl_dict.keys()
1590
1591     # Add corresponding NDR test results:
1592     for job, builds in table[u"reference"][u"data"].items():
1593         for build in builds:
1594             for tst_name, tst_data in data[job][str(build)].items():
1595                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1596                     replace(u"-mrr", u"")
1597                 if tst_name_mod not in tests_lst:
1598                     continue
1599                 try:
1600                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1601                         continue
1602                     if table[u"include-tests"] == u"MRR":
1603                         result = (tst_data[u"result"][u"receive-rate"],
1604                                   tst_data[u"result"][u"receive-stdev"])
1605                     elif table[u"include-tests"] == u"PDR":
1606                         result = \
1607                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1608                     elif table[u"include-tests"] == u"NDR":
1609                         result = \
1610                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1611                     else:
1612                         result = None
1613                     if result is not None:
1614                         tbl_dict[tst_name_mod][u"ref-data"].append(
1615                             result)
1616                 except (KeyError, TypeError):
1617                     continue
1618
1619     tbl_lst = list()
1620     for tst_name in tbl_dict:
1621         item = [tbl_dict[tst_name][u"name"], ]
1622         data_r = tbl_dict[tst_name][u"ref-data"]
1623         if data_r:
1624             if table[u"include-tests"] == u"MRR":
1625                 data_r_mean = data_r[0][0]
1626                 data_r_stdev = data_r[0][1]
1627             else:
1628                 data_r_mean = mean(data_r)
1629                 data_r_stdev = stdev(data_r)
1630             item.append(round(data_r_mean / 1e6, 1))
1631             item.append(round(data_r_stdev / 1e6, 1))
1632         else:
1633             data_r_mean = None
1634             data_r_stdev = None
1635             item.extend([None, None])
1636         data_c = tbl_dict[tst_name][u"cmp-data"]
1637         if data_c:
1638             if table[u"include-tests"] == u"MRR":
1639                 data_c_mean = data_c[0][0]
1640                 data_c_stdev = data_c[0][1]
1641             else:
1642                 data_c_mean = mean(data_c)
1643                 data_c_stdev = stdev(data_c)
1644             item.append(round(data_c_mean / 1e6, 1))
1645             item.append(round(data_c_stdev / 1e6, 1))
1646         else:
1647             data_c_mean = None
1648             data_c_stdev = None
1649             item.extend([None, None])
1650         if data_r_mean is not None and data_c_mean is not None:
1651             delta, d_stdev = relative_change_stdev(
1652                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1653             try:
1654                 item.append(round(delta))
1655             except ValueError:
1656                 item.append(delta)
1657             try:
1658                 item.append(round(d_stdev))
1659             except ValueError:
1660                 item.append(d_stdev)
1661             tbl_lst.append(item)
1662
1663     # Sort the table according to the relative change
1664     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1665
1666     # Generate csv tables:
1667     csv_file = f"{table[u'output-file']}.csv"
1668     with open(csv_file, u"wt") as file_handler:
1669         file_handler.write(header_str)
1670         for test in tbl_lst:
1671             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1672
1673     convert_csv_to_pretty_txt(
1674         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1675     )
1676     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1677         txt_file.write(legend)
1678
1679     # Generate html table:
1680     _tpc_generate_html_table(
1681         header,
1682         tbl_lst,
1683         f"{table[u'output-file']}.html",
1684         legend=legend
1685     )
1686
1687
1688 def table_perf_trending_dash(table, input_data):
1689     """Generate the table(s) with algorithm:
1690     table_perf_trending_dash
1691     specified in the specification file.
1692
1693     :param table: Table to generate.
1694     :param input_data: Data to process.
1695     :type table: pandas.Series
1696     :type input_data: InputData
1697     """
1698
1699     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1700
1701     # Transform the data
1702     logging.info(
1703         f"    Creating the data set for the {table.get(u'type', u'')} "
1704         f"{table.get(u'title', u'')}."
1705     )
1706     data = input_data.filter_data(table, continue_on_error=True)
1707
1708     # Prepare the header of the tables
1709     header = [
1710         u"Test Case",
1711         u"Trend [Mpps]",
1712         u"Short-Term Change [%]",
1713         u"Long-Term Change [%]",
1714         u"Regressions [#]",
1715         u"Progressions [#]"
1716     ]
1717     header_str = u",".join(header) + u"\n"
1718
1719     # Prepare data to the table:
1720     tbl_dict = dict()
1721     for job, builds in table[u"data"].items():
1722         for build in builds:
1723             for tst_name, tst_data in data[job][str(build)].items():
1724                 if tst_name.lower() in table.get(u"ignore-list", list()):
1725                     continue
1726                 if tbl_dict.get(tst_name, None) is None:
1727                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1728                     if not groups:
1729                         continue
1730                     nic = groups.group(0)
1731                     tbl_dict[tst_name] = {
1732                         u"name": f"{nic}-{tst_data[u'name']}",
1733                         u"data": OrderedDict()
1734                     }
1735                 try:
1736                     tbl_dict[tst_name][u"data"][str(build)] = \
1737                         tst_data[u"result"][u"receive-rate"]
1738                 except (TypeError, KeyError):
1739                     pass  # No data in output.xml for this test
1740
1741     tbl_lst = list()
1742     for tst_name in tbl_dict:
1743         data_t = tbl_dict[tst_name][u"data"]
1744         if len(data_t) < 2:
1745             continue
1746
1747         classification_lst, avgs = classify_anomalies(data_t)
1748
1749         win_size = min(len(data_t), table[u"window"])
1750         long_win_size = min(len(data_t), table[u"long-trend-window"])
1751
1752         try:
1753             max_long_avg = max(
1754                 [x for x in avgs[-long_win_size:-win_size]
1755                  if not isnan(x)])
1756         except ValueError:
1757             max_long_avg = nan
1758         last_avg = avgs[-1]
1759         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1760
1761         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1762             rel_change_last = nan
1763         else:
1764             rel_change_last = round(
1765                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1766
1767         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1768             rel_change_long = nan
1769         else:
1770             rel_change_long = round(
1771                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1772
1773         if classification_lst:
1774             if isnan(rel_change_last) and isnan(rel_change_long):
1775                 continue
1776             if isnan(last_avg) or isnan(rel_change_last) or \
1777                     isnan(rel_change_long):
1778                 continue
1779             tbl_lst.append(
1780                 [tbl_dict[tst_name][u"name"],
1781                  round(last_avg / 1e6, 2),
1782                  rel_change_last,
1783                  rel_change_long,
1784                  classification_lst[-win_size:].count(u"regression"),
1785                  classification_lst[-win_size:].count(u"progression")])
1786
1787     tbl_lst.sort(key=lambda rel: rel[0])
1788
1789     tbl_sorted = list()
1790     for nrr in range(table[u"window"], -1, -1):
1791         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1792         for nrp in range(table[u"window"], -1, -1):
1793             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1794             tbl_out.sort(key=lambda rel: rel[2])
1795             tbl_sorted.extend(tbl_out)
1796
1797     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1798
1799     logging.info(f"    Writing file: {file_name}")
1800     with open(file_name, u"wt") as file_handler:
1801         file_handler.write(header_str)
1802         for test in tbl_sorted:
1803             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1804
1805     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1806     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1807
1808
1809 def _generate_url(testbed, test_name):
1810     """Generate URL to a trending plot from the name of the test case.
1811
1812     :param testbed: The testbed used for testing.
1813     :param test_name: The name of the test case.
1814     :type testbed: str
1815     :type test_name: str
1816     :returns: The URL to the plot with the trending data for the given test
1817         case.
1818     :rtype str
1819     """
1820
1821     if u"x520" in test_name:
1822         nic = u"x520"
1823     elif u"x710" in test_name:
1824         nic = u"x710"
1825     elif u"xl710" in test_name:
1826         nic = u"xl710"
1827     elif u"xxv710" in test_name:
1828         nic = u"xxv710"
1829     elif u"vic1227" in test_name:
1830         nic = u"vic1227"
1831     elif u"vic1385" in test_name:
1832         nic = u"vic1385"
1833     elif u"x553" in test_name:
1834         nic = u"x553"
1835     elif u"cx556" in test_name or u"cx556a" in test_name:
1836         nic = u"cx556a"
1837     else:
1838         nic = u""
1839
1840     if u"64b" in test_name:
1841         frame_size = u"64b"
1842     elif u"78b" in test_name:
1843         frame_size = u"78b"
1844     elif u"imix" in test_name:
1845         frame_size = u"imix"
1846     elif u"9000b" in test_name:
1847         frame_size = u"9000b"
1848     elif u"1518b" in test_name:
1849         frame_size = u"1518b"
1850     elif u"114b" in test_name:
1851         frame_size = u"114b"
1852     else:
1853         frame_size = u""
1854
1855     if u"1t1c" in test_name or \
1856         (u"-1c-" in test_name and
1857          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1858         cores = u"1t1c"
1859     elif u"2t2c" in test_name or \
1860          (u"-2c-" in test_name and
1861           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1862         cores = u"2t2c"
1863     elif u"4t4c" in test_name or \
1864          (u"-4c-" in test_name and
1865           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1866         cores = u"4t4c"
1867     elif u"2t1c" in test_name or \
1868          (u"-1c-" in test_name and
1869           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1870         cores = u"2t1c"
1871     elif u"4t2c" in test_name or \
1872          (u"-2c-" in test_name and
1873           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1874         cores = u"4t2c"
1875     elif u"8t4c" in test_name or \
1876          (u"-4c-" in test_name and
1877           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1878         cores = u"8t4c"
1879     else:
1880         cores = u""
1881
1882     if u"testpmd" in test_name:
1883         driver = u"testpmd"
1884     elif u"l3fwd" in test_name:
1885         driver = u"l3fwd"
1886     elif u"avf" in test_name:
1887         driver = u"avf"
1888     elif u"rdma" in test_name:
1889         driver = u"rdma"
1890     elif u"dnv" in testbed or u"tsh" in testbed:
1891         driver = u"ixgbe"
1892     else:
1893         driver = u"dpdk"
1894
1895     if u"acl" in test_name or \
1896             u"macip" in test_name or \
1897             u"nat" in test_name or \
1898             u"policer" in test_name or \
1899             u"cop" in test_name:
1900         bsf = u"features"
1901     elif u"scale" in test_name:
1902         bsf = u"scale"
1903     elif u"base" in test_name:
1904         bsf = u"base"
1905     else:
1906         bsf = u"base"
1907
1908     if u"114b" in test_name and u"vhost" in test_name:
1909         domain = u"vts"
1910     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1911         domain = u"dpdk"
1912     elif u"memif" in test_name:
1913         domain = u"container_memif"
1914     elif u"srv6" in test_name:
1915         domain = u"srv6"
1916     elif u"vhost" in test_name:
1917         domain = u"vhost"
1918         if u"vppl2xc" in test_name:
1919             driver += u"-vpp"
1920         else:
1921             driver += u"-testpmd"
1922         if u"lbvpplacp" in test_name:
1923             bsf += u"-link-bonding"
1924     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1925         domain = u"nf_service_density_vnfc"
1926     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1927         domain = u"nf_service_density_cnfc"
1928     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1929         domain = u"nf_service_density_cnfp"
1930     elif u"ipsec" in test_name:
1931         domain = u"ipsec"
1932         if u"sw" in test_name:
1933             bsf += u"-sw"
1934         elif u"hw" in test_name:
1935             bsf += u"-hw"
1936     elif u"ethip4vxlan" in test_name:
1937         domain = u"ip4_tunnels"
1938     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1939         domain = u"ip4"
1940     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1941         domain = u"ip6"
1942     elif u"l2xcbase" in test_name or \
1943             u"l2xcscale" in test_name or \
1944             u"l2bdbasemaclrn" in test_name or \
1945             u"l2bdscale" in test_name or \
1946             u"l2patch" in test_name:
1947         domain = u"l2"
1948     else:
1949         domain = u""
1950
1951     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1952     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1953
1954     return file_name + anchor_name
1955
1956
1957 def table_perf_trending_dash_html(table, input_data):
1958     """Generate the table(s) with algorithm:
1959     table_perf_trending_dash_html specified in the specification
1960     file.
1961
1962     :param table: Table to generate.
1963     :param input_data: Data to process.
1964     :type table: dict
1965     :type input_data: InputData
1966     """
1967
1968     _ = input_data
1969
1970     if not table.get(u"testbed", None):
1971         logging.error(
1972             f"The testbed is not defined for the table "
1973             f"{table.get(u'title', u'')}."
1974         )
1975         return
1976
1977     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1978
1979     try:
1980         with open(table[u"input-file"], u'rt') as csv_file:
1981             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1982     except KeyError:
1983         logging.warning(u"The input file is not defined.")
1984         return
1985     except csv.Error as err:
1986         logging.warning(
1987             f"Not possible to process the file {table[u'input-file']}.\n"
1988             f"{repr(err)}"
1989         )
1990         return
1991
1992     # Table:
1993     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1994
1995     # Table header:
1996     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1997     for idx, item in enumerate(csv_lst[0]):
1998         alignment = u"left" if idx == 0 else u"center"
1999         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2000         thead.text = item
2001
2002     # Rows:
2003     colors = {
2004         u"regression": (
2005             u"#ffcccc",
2006             u"#ff9999"
2007         ),
2008         u"progression": (
2009             u"#c6ecc6",
2010             u"#9fdf9f"
2011         ),
2012         u"normal": (
2013             u"#e9f1fb",
2014             u"#d4e4f7"
2015         )
2016     }
2017     for r_idx, row in enumerate(csv_lst[1:]):
2018         if int(row[4]):
2019             color = u"regression"
2020         elif int(row[5]):
2021             color = u"progression"
2022         else:
2023             color = u"normal"
2024         trow = ET.SubElement(
2025             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2026         )
2027
2028         # Columns:
2029         for c_idx, item in enumerate(row):
2030             tdata = ET.SubElement(
2031                 trow,
2032                 u"td",
2033                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2034             )
2035             # Name:
2036             if c_idx == 0:
2037                 ref = ET.SubElement(
2038                     tdata,
2039                     u"a",
2040                     attrib=dict(
2041                         href=f"../trending/"
2042                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2043                     )
2044                 )
2045                 ref.text = item
2046             else:
2047                 tdata.text = item
2048     try:
2049         with open(table[u"output-file"], u'w') as html_file:
2050             logging.info(f"    Writing file: {table[u'output-file']}")
2051             html_file.write(u".. raw:: html\n\n\t")
2052             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2053             html_file.write(u"\n\t<p><br><br></p>\n")
2054     except KeyError:
2055         logging.warning(u"The output file is not defined.")
2056         return
2057
2058
2059 def table_last_failed_tests(table, input_data):
2060     """Generate the table(s) with algorithm: table_last_failed_tests
2061     specified in the specification file.
2062
2063     :param table: Table to generate.
2064     :param input_data: Data to process.
2065     :type table: pandas.Series
2066     :type input_data: InputData
2067     """
2068
2069     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2070
2071     # Transform the data
2072     logging.info(
2073         f"    Creating the data set for the {table.get(u'type', u'')} "
2074         f"{table.get(u'title', u'')}."
2075     )
2076
2077     data = input_data.filter_data(table, continue_on_error=True)
2078
2079     if data is None or data.empty:
2080         logging.warning(
2081             f"    No data for the {table.get(u'type', u'')} "
2082             f"{table.get(u'title', u'')}."
2083         )
2084         return
2085
2086     tbl_list = list()
2087     for job, builds in table[u"data"].items():
2088         for build in builds:
2089             build = str(build)
2090             try:
2091                 version = input_data.metadata(job, build).get(u"version", u"")
2092             except KeyError:
2093                 logging.error(f"Data for {job}: {build} is not present.")
2094                 return
2095             tbl_list.append(build)
2096             tbl_list.append(version)
2097             failed_tests = list()
2098             passed = 0
2099             failed = 0
2100             for tst_data in data[job][build].values:
2101                 if tst_data[u"status"] != u"FAIL":
2102                     passed += 1
2103                     continue
2104                 failed += 1
2105                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2106                 if not groups:
2107                     continue
2108                 nic = groups.group(0)
2109                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2110             tbl_list.append(str(passed))
2111             tbl_list.append(str(failed))
2112             tbl_list.extend(failed_tests)
2113
2114     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2115     logging.info(f"    Writing file: {file_name}")
2116     with open(file_name, u"wt") as file_handler:
2117         for test in tbl_list:
2118             file_handler.write(test + u'\n')
2119
2120
2121 def table_failed_tests(table, input_data):
2122     """Generate the table(s) with algorithm: table_failed_tests
2123     specified in the specification file.
2124
2125     :param table: Table to generate.
2126     :param input_data: Data to process.
2127     :type table: pandas.Series
2128     :type input_data: InputData
2129     """
2130
2131     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2132
2133     # Transform the data
2134     logging.info(
2135         f"    Creating the data set for the {table.get(u'type', u'')} "
2136         f"{table.get(u'title', u'')}."
2137     )
2138     data = input_data.filter_data(table, continue_on_error=True)
2139
2140     # Prepare the header of the tables
2141     header = [
2142         u"Test Case",
2143         u"Failures [#]",
2144         u"Last Failure [Time]",
2145         u"Last Failure [VPP-Build-Id]",
2146         u"Last Failure [CSIT-Job-Build-Id]"
2147     ]
2148
2149     # Generate the data for the table according to the model in the table
2150     # specification
2151
2152     now = dt.utcnow()
2153     timeperiod = timedelta(int(table.get(u"window", 7)))
2154
2155     tbl_dict = dict()
2156     for job, builds in table[u"data"].items():
2157         for build in builds:
2158             build = str(build)
2159             for tst_name, tst_data in data[job][build].items():
2160                 if tst_name.lower() in table.get(u"ignore-list", list()):
2161                     continue
2162                 if tbl_dict.get(tst_name, None) is None:
2163                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
2164                     if not groups:
2165                         continue
2166                     nic = groups.group(0)
2167                     tbl_dict[tst_name] = {
2168                         u"name": f"{nic}-{tst_data[u'name']}",
2169                         u"data": OrderedDict()
2170                     }
2171                 try:
2172                     generated = input_data.metadata(job, build).\
2173                         get(u"generated", u"")
2174                     if not generated:
2175                         continue
2176                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
2177                     if (now - then) <= timeperiod:
2178                         tbl_dict[tst_name][u"data"][build] = (
2179                             tst_data[u"status"],
2180                             generated,
2181                             input_data.metadata(job, build).get(u"version",
2182                                                                 u""),
2183                             build
2184                         )
2185                 except (TypeError, KeyError) as err:
2186                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2187
2188     max_fails = 0
2189     tbl_lst = list()
2190     for tst_data in tbl_dict.values():
2191         fails_nr = 0
2192         fails_last_date = u""
2193         fails_last_vpp = u""
2194         fails_last_csit = u""
2195         for val in tst_data[u"data"].values():
2196             if val[0] == u"FAIL":
2197                 fails_nr += 1
2198                 fails_last_date = val[1]
2199                 fails_last_vpp = val[2]
2200                 fails_last_csit = val[3]
2201         if fails_nr:
2202             max_fails = fails_nr if fails_nr > max_fails else max_fails
2203             tbl_lst.append(
2204                 [
2205                     tst_data[u"name"],
2206                     fails_nr,
2207                     fails_last_date,
2208                     fails_last_vpp,
2209                     f"mrr-daily-build-{fails_last_csit}"
2210                 ]
2211             )
2212
2213     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2214     tbl_sorted = list()
2215     for nrf in range(max_fails, -1, -1):
2216         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2217         tbl_sorted.extend(tbl_fails)
2218
2219     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2220     logging.info(f"    Writing file: {file_name}")
2221     with open(file_name, u"wt") as file_handler:
2222         file_handler.write(u",".join(header) + u"\n")
2223         for test in tbl_sorted:
2224             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2225
2226     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2227     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2228
2229
2230 def table_failed_tests_html(table, input_data):
2231     """Generate the table(s) with algorithm: table_failed_tests_html
2232     specified in the specification file.
2233
2234     :param table: Table to generate.
2235     :param input_data: Data to process.
2236     :type table: pandas.Series
2237     :type input_data: InputData
2238     """
2239
2240     _ = input_data
2241
2242     if not table.get(u"testbed", None):
2243         logging.error(
2244             f"The testbed is not defined for the table "
2245             f"{table.get(u'title', u'')}."
2246         )
2247         return
2248
2249     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2250
2251     try:
2252         with open(table[u"input-file"], u'rt') as csv_file:
2253             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2254     except KeyError:
2255         logging.warning(u"The input file is not defined.")
2256         return
2257     except csv.Error as err:
2258         logging.warning(
2259             f"Not possible to process the file {table[u'input-file']}.\n"
2260             f"{repr(err)}"
2261         )
2262         return
2263
2264     # Table:
2265     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2266
2267     # Table header:
2268     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2269     for idx, item in enumerate(csv_lst[0]):
2270         alignment = u"left" if idx == 0 else u"center"
2271         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2272         thead.text = item
2273
2274     # Rows:
2275     colors = (u"#e9f1fb", u"#d4e4f7")
2276     for r_idx, row in enumerate(csv_lst[1:]):
2277         background = colors[r_idx % 2]
2278         trow = ET.SubElement(
2279             failed_tests, u"tr", attrib=dict(bgcolor=background)
2280         )
2281
2282         # Columns:
2283         for c_idx, item in enumerate(row):
2284             tdata = ET.SubElement(
2285                 trow,
2286                 u"td",
2287                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2288             )
2289             # Name:
2290             if c_idx == 0:
2291                 ref = ET.SubElement(
2292                     tdata,
2293                     u"a",
2294                     attrib=dict(
2295                         href=f"../trending/"
2296                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2297                     )
2298                 )
2299                 ref.text = item
2300             else:
2301                 tdata.text = item
2302     try:
2303         with open(table[u"output-file"], u'w') as html_file:
2304             logging.info(f"    Writing file: {table[u'output-file']}")
2305             html_file.write(u".. raw:: html\n\n\t")
2306             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2307             html_file.write(u"\n\t<p><br><br></p>\n")
2308     except KeyError:
2309         logging.warning(u"The output file is not defined.")
2310         return