Report: Comparisom tables - Legend
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
33
34 from pal_utils import mean, stdev, classify_anomalies, \
35     convert_csv_to_pretty_txt, relative_change_stdev
36
37
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
39
40
41 def generate_tables(spec, data):
42     """Generate all tables specified in the specification file.
43
44     :param spec: Specification read from the specification file.
45     :param data: Data to process.
46     :type spec: Specification
47     :type data: InputData
48     """
49
50     generator = {
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100
101     sort_tests = table.get(u"sort", None)
102     if sort_tests:
103         args = dict(
104             inplace=True,
105             ascending=(sort_tests == u"ascending")
106         )
107         data.sort_index(**args)
108
109     suites = input_data.filter_data(
110         table,
111         continue_on_error=True,
112         data_set=u"suites"
113     )
114     if suites.empty:
115         return
116     suites = input_data.merge_data(suites)
117
118     def _generate_html_table(tst_data):
119         """Generate an HTML table with operational data for the given test.
120
121         :param tst_data: Test data to be used to generate the table.
122         :type tst_data: pandas.Series
123         :returns: HTML table with operational data.
124         :rtype: str
125         """
126
127         colors = {
128             u"header": u"#7eade7",
129             u"empty": u"#ffffff",
130             u"body": (u"#e9f1fb", u"#d4e4f7")
131         }
132
133         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134
135         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136         thead = ET.SubElement(
137             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138         )
139         thead.text = tst_data[u"name"]
140
141         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142         thead = ET.SubElement(
143             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144         )
145         thead.text = u"\t"
146
147         if tst_data.get(u"show-run", u"No Data") == u"No Data":
148             trow = ET.SubElement(
149                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150             )
151             tcol = ET.SubElement(
152                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153             )
154             tcol.text = u"No Data"
155
156             trow = ET.SubElement(
157                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158             )
159             thead = ET.SubElement(
160                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161             )
162             font = ET.SubElement(
163                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
164             )
165             font.text = u"."
166             return str(ET.tostring(tbl, encoding=u"unicode"))
167
168         tbl_hdr = (
169             u"Name",
170             u"Nr of Vectors",
171             u"Nr of Packets",
172             u"Suspends",
173             u"Cycles per Packet",
174             u"Average Vector Size"
175         )
176
177         for dut_data in tst_data[u"show-run"].values():
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             if dut_data.get(u"threads", None) is None:
185                 tcol.text = u"No Data"
186                 continue
187
188             bold = ET.SubElement(tcol, u"b")
189             bold.text = (
190                 f"Host IP: {dut_data.get(u'host', '')}, "
191                 f"Socket: {dut_data.get(u'socket', '')}"
192             )
193             trow = ET.SubElement(
194                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195             )
196             thead = ET.SubElement(
197                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
198             )
199             thead.text = u"\t"
200
201             for thread_nr, thread in dut_data[u"threads"].items():
202                 trow = ET.SubElement(
203                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204                 )
205                 tcol = ET.SubElement(
206                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207                 )
208                 bold = ET.SubElement(tcol, u"b")
209                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210                 trow = ET.SubElement(
211                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212                 )
213                 for idx, col in enumerate(tbl_hdr):
214                     tcol = ET.SubElement(
215                         trow, u"td",
216                         attrib=dict(align=u"right" if idx else u"left")
217                     )
218                     font = ET.SubElement(
219                         tcol, u"font", attrib=dict(size=u"2")
220                     )
221                     bold = ET.SubElement(font, u"b")
222                     bold.text = col
223                 for row_nr, row in enumerate(thread):
224                     trow = ET.SubElement(
225                         tbl, u"tr",
226                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227                     )
228                     for idx, col in enumerate(row):
229                         tcol = ET.SubElement(
230                             trow, u"td",
231                             attrib=dict(align=u"right" if idx else u"left")
232                         )
233                         font = ET.SubElement(
234                             tcol, u"font", attrib=dict(size=u"2")
235                         )
236                         if isinstance(col, float):
237                             font.text = f"{col:.2f}"
238                         else:
239                             font.text = str(col)
240                 trow = ET.SubElement(
241                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242                 )
243                 thead = ET.SubElement(
244                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
245                 )
246                 thead.text = u"\t"
247
248         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249         thead = ET.SubElement(
250             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251         )
252         font = ET.SubElement(
253             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
254         )
255         font.text = u"."
256
257         return str(ET.tostring(tbl, encoding=u"unicode"))
258
259     for suite in suites.values:
260         html_table = str()
261         for test_data in data.values:
262             if test_data[u"parent"] not in suite[u"name"]:
263                 continue
264             html_table += _generate_html_table(test_data)
265         if not html_table:
266             continue
267         try:
268             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269             with open(f"{file_name}", u'w') as html_file:
270                 logging.info(f"    Writing file: {file_name}")
271                 html_file.write(u".. raw:: html\n\n\t")
272                 html_file.write(html_table)
273                 html_file.write(u"\n\t<p><br><br></p>\n")
274         except KeyError:
275             logging.warning(u"The output file is not defined.")
276             return
277     logging.info(u"  Done.")
278
279
280 def table_merged_details(table, input_data):
281     """Generate the table(s) with algorithm: table_merged_details
282     specified in the specification file.
283
284     :param table: Table to generate.
285     :param input_data: Data to process.
286     :type table: pandas.Series
287     :type input_data: InputData
288     """
289
290     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
291
292     # Transform the data
293     logging.info(
294         f"    Creating the data set for the {table.get(u'type', u'')} "
295         f"{table.get(u'title', u'')}."
296     )
297     data = input_data.filter_data(table, continue_on_error=True)
298     data = input_data.merge_data(data)
299
300     sort_tests = table.get(u"sort", None)
301     if sort_tests:
302         args = dict(
303             inplace=True,
304             ascending=(sort_tests == u"ascending")
305         )
306         data.sort_index(**args)
307
308     suites = input_data.filter_data(
309         table, continue_on_error=True, data_set=u"suites")
310     suites = input_data.merge_data(suites)
311
312     # Prepare the header of the tables
313     header = list()
314     for column in table[u"columns"]:
315         header.append(
316             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317         )
318
319     for suite in suites.values:
320         # Generate data
321         suite_name = suite[u"name"]
322         table_lst = list()
323         for test in data.keys():
324             if data[test][u"parent"] not in suite_name:
325                 continue
326             row_lst = list()
327             for column in table[u"columns"]:
328                 try:
329                     col_data = str(data[test][column[
330                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
331                     # Do not include tests with "Test Failed" in test message
332                     if u"Test Failed" in col_data:
333                         continue
334                     col_data = col_data.replace(
335                         u"No Data", u"Not Captured     "
336                     )
337                     if column[u"data"].split(u" ")[1] in (u"name", ):
338                         if len(col_data) > 30:
339                             col_data_lst = col_data.split(u"-")
340                             half = int(len(col_data_lst) / 2)
341                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
342                                        f"- |br| " \
343                                        f"{u'-'.join(col_data_lst[half:])}"
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
346                         # Temporary solution: remove NDR results from message:
347                         if bool(table.get(u'remove-ndr', False)):
348                             try:
349                                 col_data = col_data.split(u" |br| ", 1)[1]
350                             except IndexError:
351                                 pass
352                         col_data = f" |prein| {col_data} |preout| "
353                     elif column[u"data"].split(u" ")[1] in \
354                             (u"conf-history", u"show-run"):
355                         col_data = col_data.replace(u" |br| ", u"", 1)
356                         col_data = f" |prein| {col_data[:-5]} |preout| "
357                     row_lst.append(f'"{col_data}"')
358                 except KeyError:
359                     row_lst.append(u'"Not captured"')
360             if len(row_lst) == len(table[u"columns"]):
361                 table_lst.append(row_lst)
362
363         # Write the data to file
364         if table_lst:
365             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367             logging.info(f"      Writing file: {file_name}")
368             with open(file_name, u"wt") as file_handler:
369                 file_handler.write(u",".join(header) + u"\n")
370                 for item in table_lst:
371                     file_handler.write(u",".join(item) + u"\n")
372
373     logging.info(u"  Done.")
374
375
376 def _tpc_modify_test_name(test_name):
377     """Modify a test name by replacing its parts.
378
379     :param test_name: Test name to be modified.
380     :type test_name: str
381     :returns: Modified test name.
382     :rtype: str
383     """
384     test_name_mod = test_name.\
385         replace(u"-ndrpdrdisc", u""). \
386         replace(u"-ndrpdr", u"").\
387         replace(u"-pdrdisc", u""). \
388         replace(u"-ndrdisc", u"").\
389         replace(u"-pdr", u""). \
390         replace(u"-ndr", u""). \
391         replace(u"1t1c", u"1c").\
392         replace(u"2t1c", u"1c"). \
393         replace(u"2t2c", u"2c").\
394         replace(u"4t2c", u"2c"). \
395         replace(u"4t4c", u"4c").\
396         replace(u"8t4c", u"4c")
397
398     return re.sub(REGEX_NIC, u"", test_name_mod)
399
400
401 def _tpc_modify_displayed_test_name(test_name):
402     """Modify a test name which is displayed in a table by replacing its parts.
403
404     :param test_name: Test name to be modified.
405     :type test_name: str
406     :returns: Modified test name.
407     :rtype: str
408     """
409     return test_name.\
410         replace(u"1t1c", u"1c").\
411         replace(u"2t1c", u"1c"). \
412         replace(u"2t2c", u"2c").\
413         replace(u"4t2c", u"2c"). \
414         replace(u"4t4c", u"4c").\
415         replace(u"8t4c", u"4c")
416
417
418 def _tpc_insert_data(target, src, include_tests):
419     """Insert src data to the target structure.
420
421     :param target: Target structure where the data is placed.
422     :param src: Source data to be placed into the target stucture.
423     :param include_tests: Which results will be included (MRR, NDR, PDR).
424     :type target: list
425     :type src: dict
426     :type include_tests: str
427     """
428     try:
429         if include_tests == u"MRR":
430             target.append(
431                 (
432                     src[u"result"][u"receive-rate"],
433                     src[u"result"][u"receive-stdev"]
434                 )
435             )
436         elif include_tests == u"PDR":
437             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438         elif include_tests == u"NDR":
439             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440     except (KeyError, TypeError):
441         pass
442
443
444 def _tpc_sort_table(table):
445     """Sort the table this way:
446
447     1. Put "New in CSIT-XXXX" at the first place.
448     2. Put "See footnote" at the second place.
449     3. Sort the rest by "Delta".
450
451     :param table: Table to sort.
452     :type table: list
453     :returns: Sorted table.
454     :rtype: list
455     """
456
457     tbl_new = list()
458     tbl_see = list()
459     tbl_delta = list()
460     for item in table:
461         if isinstance(item[-1], str):
462             if u"New in CSIT" in item[-1]:
463                 tbl_new.append(item)
464             elif u"See footnote" in item[-1]:
465                 tbl_see.append(item)
466         else:
467             tbl_delta.append(item)
468
469     # Sort the tables:
470     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
475
476     # Put the tables together:
477     table = list()
478     # We do not want "New in CSIT":
479     # table.extend(tbl_new)
480     table.extend(tbl_see)
481     table.extend(tbl_delta)
482
483     return table
484
485
486 def _tpc_generate_html_table(header, data, output_file_name):
487     """Generate html table from input data with simple sorting possibility.
488
489     :param header: Table header.
490     :param data: Input data to be included in the table. It is a list of lists.
491         Inner lists are rows in the table. All inner lists must be of the same
492         length. The length of these lists must be the same as the length of the
493         header.
494     :param output_file_name: The name (relative or full path) where the
495         generated html table is written.
496     :type header: list
497     :type data: list of lists
498     :type output_file_name: str
499     """
500
501     try:
502         idx = header.index(u"Test Case")
503     except ValueError:
504         idx = 0
505     params = {
506         u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
507         u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
508         u"width": ([28, 9], [4, 24, 10])
509     }
510
511     df_data = pd.DataFrame(data, columns=header)
512
513     df_sorted = [df_data.sort_values(
514         by=[key, header[idx]], ascending=[True, True]
515         if key != header[idx] else [False, True]) for key in header]
516     df_sorted_rev = [df_data.sort_values(
517         by=[key, header[idx]], ascending=[False, True]
518         if key != header[idx] else [True, True]) for key in header]
519     df_sorted.extend(df_sorted_rev)
520
521     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
522                    for idx in range(len(df_data))]]
523     table_header = dict(
524         values=[f"<b>{item}</b>" for item in header],
525         fill_color=u"#7eade7",
526         align=params[u"align-hdr"][idx]
527     )
528
529     fig = go.Figure()
530
531     for table in df_sorted:
532         columns = [table.get(col) for col in header]
533         fig.add_trace(
534             go.Table(
535                 columnwidth=params[u"width"][idx],
536                 header=table_header,
537                 cells=dict(
538                     values=columns,
539                     fill_color=fill_color,
540                     align=params[u"align-itm"][idx]
541                 )
542             )
543         )
544
545     buttons = list()
546     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
547     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
548     menu_items.extend(menu_items_rev)
549     for idx, hdr in enumerate(menu_items):
550         visible = [False, ] * len(menu_items)
551         visible[idx] = True
552         buttons.append(
553             dict(
554                 label=hdr.replace(u" [Mpps]", u""),
555                 method=u"update",
556                 args=[{u"visible": visible}],
557             )
558         )
559
560     fig.update_layout(
561         updatemenus=[
562             go.layout.Updatemenu(
563                 type=u"dropdown",
564                 direction=u"down",
565                 x=0.03,
566                 xanchor=u"left",
567                 y=1.045,
568                 yanchor=u"top",
569                 active=len(menu_items) - 1,
570                 buttons=list(buttons)
571             )
572         ],
573         annotations=[
574             go.layout.Annotation(
575                 text=u"<b>Sort by:</b>",
576                 x=0,
577                 xref=u"paper",
578                 y=1.035,
579                 yref=u"paper",
580                 align=u"left",
581                 showarrow=False
582             )
583         ]
584     )
585
586     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
587
588
589 def table_perf_comparison(table, input_data):
590     """Generate the table(s) with algorithm: table_perf_comparison
591     specified in the specification file.
592
593     :param table: Table to generate.
594     :param input_data: Data to process.
595     :type table: pandas.Series
596     :type input_data: InputData
597     """
598
599     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
600
601     # Transform the data
602     logging.info(
603         f"    Creating the data set for the {table.get(u'type', u'')} "
604         f"{table.get(u'title', u'')}."
605     )
606     data = input_data.filter_data(table, continue_on_error=True)
607
608     # Prepare the header of the tables
609     try:
610         header = [u"Test Case", ]
611         legend = u"\nLegend:\n"
612
613         rca_data = None
614         rca = table.get(u"rca", None)
615         if rca:
616             try:
617                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
618                     rca_data = load(rca_file, Loader=FullLoader)
619                 header.insert(0, rca.get(u"title", "RCA"))
620                 legend += (
621                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
622                 )
623             except (YAMLError, IOError) as err:
624                 logging.warning(repr(err))
625
626         history = table.get(u"history", list())
627         for item in history:
628             header.extend(
629                 [
630                     f"{item[u'title']} Avg({table[u'include-tests']})",
631                     f"{item[u'title']} Stdev({table[u'include-tests']})"
632                 ]
633             )
634             legend += (
635                 f"{item[u'title']} Avg({table[u'include-tests']}): "
636                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
637                 f"a series of runs of the listed tests executed against "
638                 f"rls{item[u'title']}.\n"
639                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
640                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
641                 f"computed from a series of runs of the listed tests executed "
642                 f"against rls{item[u'title']}.\n"
643             )
644         header.extend(
645             [
646                 f"{table[u'reference'][u'title']} "
647                 f"Avg({table[u'include-tests']})",
648                 f"{table[u'reference'][u'title']} "
649                 f"Stdev({table[u'include-tests']})",
650                 f"{table[u'compare'][u'title']} "
651                 f"Avg({table[u'include-tests']})",
652                 f"{table[u'compare'][u'title']} "
653                 f"Stdev({table[u'include-tests']})",
654                 f"Diff({table[u'reference'][u'title']},"
655                 f"{table[u'compare'][u'title']})",
656                 u"Stdev(Diff)"
657             ]
658         )
659         header_str = u";".join(header) + u"\n"
660         legend += (
661             f"{table[u'reference'][u'title']} "
662             f"Avg({table[u'include-tests']}): "
663             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
664             f"series of runs of the listed tests executed against "
665             f"rls{table[u'reference'][u'title']}.\n"
666             f"{table[u'reference'][u'title']} "
667             f"Stdev({table[u'include-tests']}): "
668             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
669             f"computed from a series of runs of the listed tests executed "
670             f"against rls{table[u'reference'][u'title']}.\n"
671             f"{table[u'compare'][u'title']} "
672             f"Avg({table[u'include-tests']}): "
673             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
674             f"series of runs of the listed tests executed against "
675             f"rls{table[u'compare'][u'title']}.\n"
676             f"{table[u'compare'][u'title']} "
677             f"Stdev({table[u'include-tests']}): "
678             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
679             f"computed from a series of runs of the listed tests executed "
680             f"against rls{table[u'compare'][u'title']}.\n"
681             f"Diff({table[u'reference'][u'title']},"
682             f"{table[u'compare'][u'title']}): "
683             f"Percentage change calculated for mean values.\n"
684             u"Stdev(Diff): "
685             u"Standard deviation of percentage change calculated for mean "
686             u"values.\n"
687             u"NT: Not Tested\n"
688         )
689     except (AttributeError, KeyError) as err:
690         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
691         return
692
693     # Prepare data to the table:
694     tbl_dict = dict()
695     for job, builds in table[u"reference"][u"data"].items():
696         for build in builds:
697             for tst_name, tst_data in data[job][str(build)].items():
698                 tst_name_mod = _tpc_modify_test_name(tst_name)
699                 if (u"across topologies" in table[u"title"].lower() or
700                         (u" 3n-" in table[u"title"].lower() and
701                          u" 2n-" in table[u"title"].lower())):
702                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
703                 if tbl_dict.get(tst_name_mod, None) is None:
704                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
705                     nic = groups.group(0) if groups else u""
706                     name = \
707                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
708                     if u"across testbeds" in table[u"title"].lower() or \
709                             u"across topologies" in table[u"title"].lower():
710                         name = _tpc_modify_displayed_test_name(name)
711                     tbl_dict[tst_name_mod] = {
712                         u"name": name,
713                         u"ref-data": list(),
714                         u"cmp-data": list()
715                     }
716                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
717                                  src=tst_data,
718                                  include_tests=table[u"include-tests"])
719
720     replacement = table[u"reference"].get(u"data-replacement", None)
721     if replacement:
722         create_new_list = True
723         rpl_data = input_data.filter_data(
724             table, data=replacement, continue_on_error=True)
725         for job, builds in replacement.items():
726             for build in builds:
727                 for tst_name, tst_data in rpl_data[job][str(build)].items():
728                     tst_name_mod = _tpc_modify_test_name(tst_name)
729                     if (u"across topologies" in table[u"title"].lower() or
730                             (u" 3n-" in table[u"title"].lower() and
731                              u" 2n-" in table[u"title"].lower())):
732                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
733                     if tbl_dict.get(tst_name_mod, None) is None:
734                         name = \
735                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
736                         if u"across testbeds" in table[u"title"].lower() or \
737                                 u"across topologies" in table[u"title"].lower():
738                             name = _tpc_modify_displayed_test_name(name)
739                         tbl_dict[tst_name_mod] = {
740                             u"name": name,
741                             u"ref-data": list(),
742                             u"cmp-data": list()
743                         }
744                     if create_new_list:
745                         create_new_list = False
746                         tbl_dict[tst_name_mod][u"ref-data"] = list()
747
748                     _tpc_insert_data(
749                         target=tbl_dict[tst_name_mod][u"ref-data"],
750                         src=tst_data,
751                         include_tests=table[u"include-tests"]
752                     )
753
754     for job, builds in table[u"compare"][u"data"].items():
755         for build in builds:
756             for tst_name, tst_data in data[job][str(build)].items():
757                 tst_name_mod = _tpc_modify_test_name(tst_name)
758                 if (u"across topologies" in table[u"title"].lower() or
759                         (u" 3n-" in table[u"title"].lower() and
760                          u" 2n-" in table[u"title"].lower())):
761                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
762                 if tbl_dict.get(tst_name_mod, None) is None:
763                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
764                     nic = groups.group(0) if groups else u""
765                     name = \
766                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
767                     if u"across testbeds" in table[u"title"].lower() or \
768                             u"across topologies" in table[u"title"].lower():
769                         name = _tpc_modify_displayed_test_name(name)
770                     tbl_dict[tst_name_mod] = {
771                         u"name": name,
772                         u"ref-data": list(),
773                         u"cmp-data": list()
774                     }
775                 _tpc_insert_data(
776                     target=tbl_dict[tst_name_mod][u"cmp-data"],
777                     src=tst_data,
778                     include_tests=table[u"include-tests"]
779                 )
780
781     replacement = table[u"compare"].get(u"data-replacement", None)
782     if replacement:
783         create_new_list = True
784         rpl_data = input_data.filter_data(
785             table, data=replacement, continue_on_error=True)
786         for job, builds in replacement.items():
787             for build in builds:
788                 for tst_name, tst_data in rpl_data[job][str(build)].items():
789                     tst_name_mod = _tpc_modify_test_name(tst_name)
790                     if (u"across topologies" in table[u"title"].lower() or
791                             (u" 3n-" in table[u"title"].lower() and
792                              u" 2n-" in table[u"title"].lower())):
793                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
794                     if tbl_dict.get(tst_name_mod, None) is None:
795                         name = \
796                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
797                         if u"across testbeds" in table[u"title"].lower() or \
798                                 u"across topologies" in table[u"title"].lower():
799                             name = _tpc_modify_displayed_test_name(name)
800                         tbl_dict[tst_name_mod] = {
801                             u"name": name,
802                             u"ref-data": list(),
803                             u"cmp-data": list()
804                         }
805                     if create_new_list:
806                         create_new_list = False
807                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
808
809                     _tpc_insert_data(
810                         target=tbl_dict[tst_name_mod][u"cmp-data"],
811                         src=tst_data,
812                         include_tests=table[u"include-tests"]
813                     )
814
815     for item in history:
816         for job, builds in item[u"data"].items():
817             for build in builds:
818                 for tst_name, tst_data in data[job][str(build)].items():
819                     tst_name_mod = _tpc_modify_test_name(tst_name)
820                     if (u"across topologies" in table[u"title"].lower() or
821                             (u" 3n-" in table[u"title"].lower() and
822                              u" 2n-" in table[u"title"].lower())):
823                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
824                     if tbl_dict.get(tst_name_mod, None) is None:
825                         continue
826                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
827                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
828                     if tbl_dict[tst_name_mod][u"history"].\
829                             get(item[u"title"], None) is None:
830                         tbl_dict[tst_name_mod][u"history"][item[
831                             u"title"]] = list()
832                     try:
833                         if table[u"include-tests"] == u"MRR":
834                             res = (tst_data[u"result"][u"receive-rate"],
835                                    tst_data[u"result"][u"receive-stdev"])
836                         elif table[u"include-tests"] == u"PDR":
837                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
838                         elif table[u"include-tests"] == u"NDR":
839                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
840                         else:
841                             continue
842                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
843                             append(res)
844                     except (TypeError, KeyError):
845                         pass
846
847     tbl_lst = list()
848     for tst_name in tbl_dict:
849         item = [tbl_dict[tst_name][u"name"], ]
850         if history:
851             if tbl_dict[tst_name].get(u"history", None) is not None:
852                 for hist_data in tbl_dict[tst_name][u"history"].values():
853                     if hist_data:
854                         if table[u"include-tests"] == u"MRR":
855                             item.append(round(hist_data[0][0] / 1e6, 1))
856                             item.append(round(hist_data[0][1] / 1e6, 1))
857                         else:
858                             item.append(round(mean(hist_data) / 1e6, 1))
859                             item.append(round(stdev(hist_data) / 1e6, 1))
860                     else:
861                         item.extend([u"NT", u"NT"])
862             else:
863                 item.extend([u"NT", u"NT"])
864         data_r = tbl_dict[tst_name][u"ref-data"]
865         if data_r:
866             if table[u"include-tests"] == u"MRR":
867                 data_r_mean = data_r[0][0]
868                 data_r_stdev = data_r[0][1]
869             else:
870                 data_r_mean = mean(data_r)
871                 data_r_stdev = stdev(data_r)
872             item.append(round(data_r_mean / 1e6, 1))
873             item.append(round(data_r_stdev / 1e6, 1))
874         else:
875             data_r_mean = None
876             data_r_stdev = None
877             item.extend([u"NT", u"NT"])
878         data_c = tbl_dict[tst_name][u"cmp-data"]
879         if data_c:
880             if table[u"include-tests"] == u"MRR":
881                 data_c_mean = data_c[0][0]
882                 data_c_stdev = data_c[0][1]
883             else:
884                 data_c_mean = mean(data_c)
885                 data_c_stdev = stdev(data_c)
886             item.append(round(data_c_mean / 1e6, 1))
887             item.append(round(data_c_stdev / 1e6, 1))
888         else:
889             data_c_mean = None
890             data_c_stdev = None
891             item.extend([u"NT", u"NT"])
892         if item[-2] == u"NT":
893             pass
894         elif item[-4] == u"NT":
895             item.append(u"New in CSIT-2001")
896             item.append(u"New in CSIT-2001")
897         elif data_r_mean is not None and data_c_mean is not None:
898             delta, d_stdev = relative_change_stdev(
899                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
900             )
901             try:
902                 item.append(round(delta))
903             except ValueError:
904                 item.append(delta)
905             try:
906                 item.append(round(d_stdev))
907             except ValueError:
908                 item.append(d_stdev)
909         if rca_data:
910             rca_nr = rca_data.get(item[0], u"-")
911             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
912         if (len(item) == len(header)) and (item[-4] != u"NT"):
913             tbl_lst.append(item)
914
915     tbl_lst = _tpc_sort_table(tbl_lst)
916
917     # Generate csv tables:
918     csv_file = f"{table[u'output-file']}.csv"
919     with open(csv_file, u"wt") as file_handler:
920         file_handler.write(header_str)
921         for test in tbl_lst:
922             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
923
924     txt_file_name = f"{table[u'output-file']}.txt"
925     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
926
927     with open(txt_file_name, u'a') as txt_file:
928         txt_file.write(legend)
929         if rca_data:
930             footnote = rca_data.get(u"footnote", u"")
931             if footnote:
932                 txt_file.write(u"\n")
933                 txt_file.write(footnote)
934         txt_file.write(u":END")
935
936     # Generate html table:
937     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
938
939
940 def table_perf_comparison_nic(table, input_data):
941     """Generate the table(s) with algorithm: table_perf_comparison
942     specified in the specification file.
943
944     :param table: Table to generate.
945     :param input_data: Data to process.
946     :type table: pandas.Series
947     :type input_data: InputData
948     """
949
950     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
951
952     # Transform the data
953     logging.info(
954         f"    Creating the data set for the {table.get(u'type', u'')} "
955         f"{table.get(u'title', u'')}."
956     )
957     data = input_data.filter_data(table, continue_on_error=True)
958
959     # Prepare the header of the tables
960     try:
961         header = [u"Test Case", ]
962         legend = u"\nLegend:\n"
963
964         rca_data = None
965         rca = table.get(u"rca", None)
966         if rca:
967             try:
968                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
969                     rca_data = load(rca_file, Loader=FullLoader)
970                 header.insert(0, rca.get(u"title", "RCA"))
971                 legend += (
972                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
973                 )
974             except (YAMLError, IOError) as err:
975                 logging.warning(repr(err))
976
977         history = table.get(u"history", list())
978         for item in history:
979             header.extend(
980                 [
981                     f"{item[u'title']} Avg({table[u'include-tests']})",
982                     f"{item[u'title']} Stdev({table[u'include-tests']})"
983                 ]
984             )
985             legend += (
986                 f"{item[u'title']} Avg({table[u'include-tests']}): "
987                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
988                 f"a series of runs of the listed tests executed against "
989                 f"rls{item[u'title']}.\n"
990                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
991                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
992                 f"computed from a series of runs of the listed tests executed "
993                 f"against rls{item[u'title']}.\n"
994             )
995         header.extend(
996             [
997                 f"{table[u'reference'][u'title']} "
998                 f"Avg({table[u'include-tests']})",
999                 f"{table[u'reference'][u'title']} "
1000                 f"Stdev({table[u'include-tests']})",
1001                 f"{table[u'compare'][u'title']} "
1002                 f"Avg({table[u'include-tests']})",
1003                 f"{table[u'compare'][u'title']} "
1004                 f"Stdev({table[u'include-tests']})",
1005                 f"Diff({table[u'reference'][u'title']},"
1006                 f"{table[u'compare'][u'title']})",
1007                 u"Stdev(Diff)"
1008             ]
1009         )
1010         header_str = u";".join(header) + u"\n"
1011         legend += (
1012             f"{table[u'reference'][u'title']} "
1013             f"Avg({table[u'include-tests']}): "
1014             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1015             f"series of runs of the listed tests executed against "
1016             f"rls{table[u'reference'][u'title']}.\n"
1017             f"{table[u'reference'][u'title']} "
1018             f"Stdev({table[u'include-tests']}): "
1019             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1020             f"computed from a series of runs of the listed tests executed "
1021             f"against rls{table[u'reference'][u'title']}.\n"
1022             f"{table[u'compare'][u'title']} "
1023             f"Avg({table[u'include-tests']}): "
1024             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1025             f"series of runs of the listed tests executed against "
1026             f"rls{table[u'compare'][u'title']}.\n"
1027             f"{table[u'compare'][u'title']} "
1028             f"Stdev({table[u'include-tests']}): "
1029             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1030             f"computed from a series of runs of the listed tests executed "
1031             f"against rls{table[u'compare'][u'title']}.\n"
1032             f"Diff({table[u'reference'][u'title']},"
1033             f"{table[u'compare'][u'title']}): "
1034             f"Percentage change calculated for mean values.\n"
1035             u"Stdev(Diff): "
1036             u"Standard deviation of percentage change calculated for mean "
1037             u"values.\n"
1038             u"NT: Not Tested\n"
1039         )
1040     except (AttributeError, KeyError) as err:
1041         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1042         return
1043
1044     # Prepare data to the table:
1045     tbl_dict = dict()
1046     for job, builds in table[u"reference"][u"data"].items():
1047         for build in builds:
1048             for tst_name, tst_data in data[job][str(build)].items():
1049                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1050                     continue
1051                 tst_name_mod = _tpc_modify_test_name(tst_name)
1052                 if (u"across topologies" in table[u"title"].lower() or
1053                         (u" 3n-" in table[u"title"].lower() and
1054                          u" 2n-" in table[u"title"].lower())):
1055                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1056                 if tbl_dict.get(tst_name_mod, None) is None:
1057                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1058                     if u"across testbeds" in table[u"title"].lower() or \
1059                             u"across topologies" in table[u"title"].lower():
1060                         name = _tpc_modify_displayed_test_name(name)
1061                     tbl_dict[tst_name_mod] = {
1062                         u"name": name,
1063                         u"ref-data": list(),
1064                         u"cmp-data": list()
1065                     }
1066                 _tpc_insert_data(
1067                     target=tbl_dict[tst_name_mod][u"ref-data"],
1068                     src=tst_data,
1069                     include_tests=table[u"include-tests"]
1070                 )
1071
1072     replacement = table[u"reference"].get(u"data-replacement", None)
1073     if replacement:
1074         create_new_list = True
1075         rpl_data = input_data.filter_data(
1076             table, data=replacement, continue_on_error=True)
1077         for job, builds in replacement.items():
1078             for build in builds:
1079                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1080                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1081                         continue
1082                     tst_name_mod = _tpc_modify_test_name(tst_name)
1083                     if (u"across topologies" in table[u"title"].lower() or
1084                             (u" 3n-" in table[u"title"].lower() and
1085                              u" 2n-" in table[u"title"].lower())):
1086                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1087                     if tbl_dict.get(tst_name_mod, None) is None:
1088                         name = \
1089                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1090                         if u"across testbeds" in table[u"title"].lower() or \
1091                                 u"across topologies" in table[u"title"].lower():
1092                             name = _tpc_modify_displayed_test_name(name)
1093                         tbl_dict[tst_name_mod] = {
1094                             u"name": name,
1095                             u"ref-data": list(),
1096                             u"cmp-data": list()
1097                         }
1098                     if create_new_list:
1099                         create_new_list = False
1100                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1101
1102                     _tpc_insert_data(
1103                         target=tbl_dict[tst_name_mod][u"ref-data"],
1104                         src=tst_data,
1105                         include_tests=table[u"include-tests"]
1106                     )
1107
1108     for job, builds in table[u"compare"][u"data"].items():
1109         for build in builds:
1110             for tst_name, tst_data in data[job][str(build)].items():
1111                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1112                     continue
1113                 tst_name_mod = _tpc_modify_test_name(tst_name)
1114                 if (u"across topologies" in table[u"title"].lower() or
1115                         (u" 3n-" in table[u"title"].lower() and
1116                          u" 2n-" in table[u"title"].lower())):
1117                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1118                 if tbl_dict.get(tst_name_mod, None) is None:
1119                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1120                     if u"across testbeds" in table[u"title"].lower() or \
1121                             u"across topologies" in table[u"title"].lower():
1122                         name = _tpc_modify_displayed_test_name(name)
1123                     tbl_dict[tst_name_mod] = {
1124                         u"name": name,
1125                         u"ref-data": list(),
1126                         u"cmp-data": list()
1127                     }
1128                 _tpc_insert_data(
1129                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1130                     src=tst_data,
1131                     include_tests=table[u"include-tests"]
1132                 )
1133
1134     replacement = table[u"compare"].get(u"data-replacement", None)
1135     if replacement:
1136         create_new_list = True
1137         rpl_data = input_data.filter_data(
1138             table, data=replacement, continue_on_error=True)
1139         for job, builds in replacement.items():
1140             for build in builds:
1141                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1142                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1143                         continue
1144                     tst_name_mod = _tpc_modify_test_name(tst_name)
1145                     if (u"across topologies" in table[u"title"].lower() or
1146                             (u" 3n-" in table[u"title"].lower() and
1147                              u" 2n-" in table[u"title"].lower())):
1148                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1149                     if tbl_dict.get(tst_name_mod, None) is None:
1150                         name = \
1151                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1152                         if u"across testbeds" in table[u"title"].lower() or \
1153                                 u"across topologies" in table[u"title"].lower():
1154                             name = _tpc_modify_displayed_test_name(name)
1155                         tbl_dict[tst_name_mod] = {
1156                             u"name": name,
1157                             u"ref-data": list(),
1158                             u"cmp-data": list()
1159                         }
1160                     if create_new_list:
1161                         create_new_list = False
1162                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1163
1164                     _tpc_insert_data(
1165                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1166                         src=tst_data,
1167                         include_tests=table[u"include-tests"]
1168                     )
1169
1170     for item in history:
1171         for job, builds in item[u"data"].items():
1172             for build in builds:
1173                 for tst_name, tst_data in data[job][str(build)].items():
1174                     if item[u"nic"] not in tst_data[u"tags"]:
1175                         continue
1176                     tst_name_mod = _tpc_modify_test_name(tst_name)
1177                     if (u"across topologies" in table[u"title"].lower() or
1178                             (u" 3n-" in table[u"title"].lower() and
1179                              u" 2n-" in table[u"title"].lower())):
1180                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1181                     if tbl_dict.get(tst_name_mod, None) is None:
1182                         continue
1183                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1184                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1185                     if tbl_dict[tst_name_mod][u"history"].\
1186                             get(item[u"title"], None) is None:
1187                         tbl_dict[tst_name_mod][u"history"][item[
1188                             u"title"]] = list()
1189                     try:
1190                         if table[u"include-tests"] == u"MRR":
1191                             res = (tst_data[u"result"][u"receive-rate"],
1192                                    tst_data[u"result"][u"receive-stdev"])
1193                         elif table[u"include-tests"] == u"PDR":
1194                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1195                         elif table[u"include-tests"] == u"NDR":
1196                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1197                         else:
1198                             continue
1199                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1200                             append(res)
1201                     except (TypeError, KeyError):
1202                         pass
1203
1204     tbl_lst = list()
1205     for tst_name in tbl_dict:
1206         item = [tbl_dict[tst_name][u"name"], ]
1207         if history:
1208             if tbl_dict[tst_name].get(u"history", None) is not None:
1209                 for hist_data in tbl_dict[tst_name][u"history"].values():
1210                     if hist_data:
1211                         if table[u"include-tests"] == u"MRR":
1212                             item.append(round(hist_data[0][0] / 1e6, 1))
1213                             item.append(round(hist_data[0][1] / 1e6, 1))
1214                         else:
1215                             item.append(round(mean(hist_data) / 1e6, 1))
1216                             item.append(round(stdev(hist_data) / 1e6, 1))
1217                     else:
1218                         item.extend([u"NT", u"NT"])
1219             else:
1220                 item.extend([u"NT", u"NT"])
1221         data_r = tbl_dict[tst_name][u"ref-data"]
1222         if data_r:
1223             if table[u"include-tests"] == u"MRR":
1224                 data_r_mean = data_r[0][0]
1225                 data_r_stdev = data_r[0][1]
1226             else:
1227                 data_r_mean = mean(data_r)
1228                 data_r_stdev = stdev(data_r)
1229             item.append(round(data_r_mean / 1e6, 1))
1230             item.append(round(data_r_stdev / 1e6, 1))
1231         else:
1232             data_r_mean = None
1233             data_r_stdev = None
1234             item.extend([u"NT", u"NT"])
1235         data_c = tbl_dict[tst_name][u"cmp-data"]
1236         if data_c:
1237             if table[u"include-tests"] == u"MRR":
1238                 data_c_mean = data_c[0][0]
1239                 data_c_stdev = data_c[0][1]
1240             else:
1241                 data_c_mean = mean(data_c)
1242                 data_c_stdev = stdev(data_c)
1243             item.append(round(data_c_mean / 1e6, 1))
1244             item.append(round(data_c_stdev / 1e6, 1))
1245         else:
1246             data_c_mean = None
1247             data_c_stdev = None
1248             item.extend([u"NT", u"NT"])
1249         if item[-2] == u"NT":
1250             pass
1251         elif item[-4] == u"NT":
1252             item.append(u"New in CSIT-2001")
1253             item.append(u"New in CSIT-2001")
1254         elif data_r_mean is not None and data_c_mean is not None:
1255             delta, d_stdev = relative_change_stdev(
1256                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1257             )
1258             try:
1259                 item.append(round(delta))
1260             except ValueError:
1261                 item.append(delta)
1262             try:
1263                 item.append(round(d_stdev))
1264             except ValueError:
1265                 item.append(d_stdev)
1266         if rca_data:
1267             rca_nr = rca_data.get(item[0], u"-")
1268             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1269         if (len(item) == len(header)) and (item[-4] != u"NT"):
1270             tbl_lst.append(item)
1271
1272     tbl_lst = _tpc_sort_table(tbl_lst)
1273
1274     # Generate csv tables:
1275     csv_file = f"{table[u'output-file']}.csv"
1276     with open(csv_file, u"wt") as file_handler:
1277         file_handler.write(header_str)
1278         for test in tbl_lst:
1279             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1280
1281     txt_file_name = f"{table[u'output-file']}.txt"
1282     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1283
1284     with open(txt_file_name, u'a') as txt_file:
1285         txt_file.write(legend)
1286         if rca_data:
1287             footnote = rca_data.get(u"footnote", u"")
1288             if footnote:
1289                 txt_file.write(u"\n")
1290                 txt_file.write(footnote)
1291         txt_file.write(u":END")
1292
1293     # Generate html table:
1294     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1295
1296
1297 def table_nics_comparison(table, input_data):
1298     """Generate the table(s) with algorithm: table_nics_comparison
1299     specified in the specification file.
1300
1301     :param table: Table to generate.
1302     :param input_data: Data to process.
1303     :type table: pandas.Series
1304     :type input_data: InputData
1305     """
1306
1307     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1308
1309     # Transform the data
1310     logging.info(
1311         f"    Creating the data set for the {table.get(u'type', u'')} "
1312         f"{table.get(u'title', u'')}."
1313     )
1314     data = input_data.filter_data(table, continue_on_error=True)
1315
1316     # Prepare the header of the tables
1317     try:
1318         header = [
1319             u"Test Case",
1320             f"{table[u'reference'][u'title']} "
1321             f"Avg({table[u'include-tests']})",
1322             f"{table[u'reference'][u'title']} "
1323             f"Stdev({table[u'include-tests']})",
1324             f"{table[u'compare'][u'title']} "
1325             f"Avg({table[u'include-tests']})",
1326             f"{table[u'compare'][u'title']} "
1327             f"Stdev({table[u'include-tests']})",
1328             f"Diff({table[u'reference'][u'title']},"
1329             f"{table[u'compare'][u'title']})",
1330             u"Stdev(Diff)"
1331         ]
1332         legend = (
1333             u"\nLegend:\n"
1334             f"{table[u'reference'][u'title']} "
1335             f"Avg({table[u'include-tests']}): "
1336             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1337             f"series of runs of the listed tests executed using "
1338             f"{table[u'reference'][u'title']} NIC.\n"
1339             f"{table[u'reference'][u'title']} "
1340             f"Stdev({table[u'include-tests']}): "
1341             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1342             f"computed from a series of runs of the listed tests executed "
1343             f"using {table[u'reference'][u'title']} NIC.\n"
1344             f"{table[u'compare'][u'title']} "
1345             f"Avg({table[u'include-tests']}): "
1346             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1347             f"series of runs of the listed tests executed using "
1348             f"{table[u'compare'][u'title']} NIC.\n"
1349             f"{table[u'compare'][u'title']} "
1350             f"Stdev({table[u'include-tests']}): "
1351             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1352             f"computed from a series of runs of the listed tests executed "
1353             f"using {table[u'compare'][u'title']} NIC.\n"
1354             f"Diff({table[u'reference'][u'title']},"
1355             f"{table[u'compare'][u'title']}): "
1356             f"Percentage change calculated for mean values.\n"
1357             u"Stdev(Diff): "
1358             u"Standard deviation of percentage change calculated for mean "
1359             u"values.\n"
1360             u":END"
1361         )
1362
1363     except (AttributeError, KeyError) as err:
1364         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1365         return
1366
1367     # Prepare data to the table:
1368     tbl_dict = dict()
1369     for job, builds in table[u"data"].items():
1370         for build in builds:
1371             for tst_name, tst_data in data[job][str(build)].items():
1372                 tst_name_mod = _tpc_modify_test_name(tst_name)
1373                 if tbl_dict.get(tst_name_mod, None) is None:
1374                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1375                     tbl_dict[tst_name_mod] = {
1376                         u"name": name,
1377                         u"ref-data": list(),
1378                         u"cmp-data": list()
1379                     }
1380                 try:
1381                     if table[u"include-tests"] == u"MRR":
1382                         result = (tst_data[u"result"][u"receive-rate"],
1383                                   tst_data[u"result"][u"receive-stdev"])
1384                     elif table[u"include-tests"] == u"PDR":
1385                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1386                     elif table[u"include-tests"] == u"NDR":
1387                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1388                     else:
1389                         continue
1390
1391                     if result and \
1392                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1393                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1394                     elif result and \
1395                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1396                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1397                 except (TypeError, KeyError) as err:
1398                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1399                     # No data in output.xml for this test
1400
1401     tbl_lst = list()
1402     for tst_name in tbl_dict:
1403         item = [tbl_dict[tst_name][u"name"], ]
1404         data_r = tbl_dict[tst_name][u"ref-data"]
1405         if data_r:
1406             if table[u"include-tests"] == u"MRR":
1407                 data_r_mean = data_r[0][0]
1408                 data_r_stdev = data_r[0][1]
1409             else:
1410                 data_r_mean = mean(data_r)
1411                 data_r_stdev = stdev(data_r)
1412             item.append(round(data_r_mean / 1e6, 1))
1413             item.append(round(data_r_stdev / 1e6, 1))
1414         else:
1415             data_r_mean = None
1416             data_r_stdev = None
1417             item.extend([None, None])
1418         data_c = tbl_dict[tst_name][u"cmp-data"]
1419         if data_c:
1420             if table[u"include-tests"] == u"MRR":
1421                 data_c_mean = data_c[0][0]
1422                 data_c_stdev = data_c[0][1]
1423             else:
1424                 data_c_mean = mean(data_c)
1425                 data_c_stdev = stdev(data_c)
1426             item.append(round(data_c_mean / 1e6, 1))
1427             item.append(round(data_c_stdev / 1e6, 1))
1428         else:
1429             data_c_mean = None
1430             data_c_stdev = None
1431             item.extend([None, None])
1432         if data_r_mean is not None and data_c_mean is not None:
1433             delta, d_stdev = relative_change_stdev(
1434                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1435             )
1436             try:
1437                 item.append(round(delta))
1438             except ValueError:
1439                 item.append(delta)
1440             try:
1441                 item.append(round(d_stdev))
1442             except ValueError:
1443                 item.append(d_stdev)
1444             tbl_lst.append(item)
1445
1446     # Sort the table according to the relative change
1447     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1448
1449     # Generate csv tables:
1450     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1451         file_handler.write(u";".join(header) + u"\n")
1452         for test in tbl_lst:
1453             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1454
1455     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1456                               f"{table[u'output-file']}.txt",
1457                               delimiter=u";")
1458
1459     with open(table[u'output-file'], u'a') as txt_file:
1460         txt_file.write(legend)
1461
1462     # Generate html table:
1463     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1464
1465
1466 def table_soak_vs_ndr(table, input_data):
1467     """Generate the table(s) with algorithm: table_soak_vs_ndr
1468     specified in the specification file.
1469
1470     :param table: Table to generate.
1471     :param input_data: Data to process.
1472     :type table: pandas.Series
1473     :type input_data: InputData
1474     """
1475
1476     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1477
1478     # Transform the data
1479     logging.info(
1480         f"    Creating the data set for the {table.get(u'type', u'')} "
1481         f"{table.get(u'title', u'')}."
1482     )
1483     data = input_data.filter_data(table, continue_on_error=True)
1484
1485     # Prepare the header of the table
1486     try:
1487         header = [
1488             u"Test Case",
1489             f"Avg({table[u'reference'][u'title']})",
1490             f"Stdev({table[u'reference'][u'title']})",
1491             f"Avg({table[u'compare'][u'title']})",
1492             f"Stdev{table[u'compare'][u'title']})",
1493             u"Diff",
1494             u"Stdev(Diff)"
1495         ]
1496         header_str = u";".join(header) + u"\n"
1497         legend = (
1498             u"\nLegend:\n"
1499             f"Avg({table[u'reference'][u'title']}): "
1500             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1501             f"from a series of runs of the listed tests.\n"
1502             f"Stdev({table[u'reference'][u'title']}): "
1503             f"Standard deviation value of {table[u'reference'][u'title']} "
1504             f"[Mpps] computed from a series of runs of the listed tests.\n"
1505             f"Avg({table[u'compare'][u'title']}): "
1506             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1507             f"a series of runs of the listed tests.\n"
1508             f"Stdev({table[u'compare'][u'title']}): "
1509             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1510             f"computed from a series of runs of the listed tests.\n"
1511             f"Diff({table[u'reference'][u'title']},"
1512             f"{table[u'compare'][u'title']}): "
1513             f"Percentage change calculated for mean values.\n"
1514             u"Stdev(Diff): "
1515             u"Standard deviation of percentage change calculated for mean "
1516             u"values.\n"
1517             u":END"
1518         )
1519     except (AttributeError, KeyError) as err:
1520         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1521         return
1522
1523     # Create a list of available SOAK test results:
1524     tbl_dict = dict()
1525     for job, builds in table[u"compare"][u"data"].items():
1526         for build in builds:
1527             for tst_name, tst_data in data[job][str(build)].items():
1528                 if tst_data[u"type"] == u"SOAK":
1529                     tst_name_mod = tst_name.replace(u"-soak", u"")
1530                     if tbl_dict.get(tst_name_mod, None) is None:
1531                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1532                         nic = groups.group(0) if groups else u""
1533                         name = (
1534                             f"{nic}-"
1535                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1536                         )
1537                         tbl_dict[tst_name_mod] = {
1538                             u"name": name,
1539                             u"ref-data": list(),
1540                             u"cmp-data": list()
1541                         }
1542                     try:
1543                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1544                             tst_data[u"throughput"][u"LOWER"])
1545                     except (KeyError, TypeError):
1546                         pass
1547     tests_lst = tbl_dict.keys()
1548
1549     # Add corresponding NDR test results:
1550     for job, builds in table[u"reference"][u"data"].items():
1551         for build in builds:
1552             for tst_name, tst_data in data[job][str(build)].items():
1553                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1554                     replace(u"-mrr", u"")
1555                 if tst_name_mod not in tests_lst:
1556                     continue
1557                 try:
1558                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1559                         continue
1560                     if table[u"include-tests"] == u"MRR":
1561                         result = (tst_data[u"result"][u"receive-rate"],
1562                                   tst_data[u"result"][u"receive-stdev"])
1563                     elif table[u"include-tests"] == u"PDR":
1564                         result = \
1565                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1566                     elif table[u"include-tests"] == u"NDR":
1567                         result = \
1568                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1569                     else:
1570                         result = None
1571                     if result is not None:
1572                         tbl_dict[tst_name_mod][u"ref-data"].append(
1573                             result)
1574                 except (KeyError, TypeError):
1575                     continue
1576
1577     tbl_lst = list()
1578     for tst_name in tbl_dict:
1579         item = [tbl_dict[tst_name][u"name"], ]
1580         data_r = tbl_dict[tst_name][u"ref-data"]
1581         if data_r:
1582             if table[u"include-tests"] == u"MRR":
1583                 data_r_mean = data_r[0][0]
1584                 data_r_stdev = data_r[0][1]
1585             else:
1586                 data_r_mean = mean(data_r)
1587                 data_r_stdev = stdev(data_r)
1588             item.append(round(data_r_mean / 1e6, 1))
1589             item.append(round(data_r_stdev / 1e6, 1))
1590         else:
1591             data_r_mean = None
1592             data_r_stdev = None
1593             item.extend([None, None])
1594         data_c = tbl_dict[tst_name][u"cmp-data"]
1595         if data_c:
1596             if table[u"include-tests"] == u"MRR":
1597                 data_c_mean = data_c[0][0]
1598                 data_c_stdev = data_c[0][1]
1599             else:
1600                 data_c_mean = mean(data_c)
1601                 data_c_stdev = stdev(data_c)
1602             item.append(round(data_c_mean / 1e6, 1))
1603             item.append(round(data_c_stdev / 1e6, 1))
1604         else:
1605             data_c_mean = None
1606             data_c_stdev = None
1607             item.extend([None, None])
1608         if data_r_mean is not None and data_c_mean is not None:
1609             delta, d_stdev = relative_change_stdev(
1610                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1611             try:
1612                 item.append(round(delta))
1613             except ValueError:
1614                 item.append(delta)
1615             try:
1616                 item.append(round(d_stdev))
1617             except ValueError:
1618                 item.append(d_stdev)
1619             tbl_lst.append(item)
1620
1621     # Sort the table according to the relative change
1622     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1623
1624     # Generate csv tables:
1625     csv_file = f"{table[u'output-file']}.csv"
1626     with open(csv_file, u"wt") as file_handler:
1627         file_handler.write(header_str)
1628         for test in tbl_lst:
1629             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1630
1631     convert_csv_to_pretty_txt(
1632         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1633     )
1634     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1635         txt_file.write(legend)
1636
1637     # Generate html table:
1638     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1639
1640
1641 def table_perf_trending_dash(table, input_data):
1642     """Generate the table(s) with algorithm:
1643     table_perf_trending_dash
1644     specified in the specification file.
1645
1646     :param table: Table to generate.
1647     :param input_data: Data to process.
1648     :type table: pandas.Series
1649     :type input_data: InputData
1650     """
1651
1652     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1653
1654     # Transform the data
1655     logging.info(
1656         f"    Creating the data set for the {table.get(u'type', u'')} "
1657         f"{table.get(u'title', u'')}."
1658     )
1659     data = input_data.filter_data(table, continue_on_error=True)
1660
1661     # Prepare the header of the tables
1662     header = [
1663         u"Test Case",
1664         u"Trend [Mpps]",
1665         u"Short-Term Change [%]",
1666         u"Long-Term Change [%]",
1667         u"Regressions [#]",
1668         u"Progressions [#]"
1669     ]
1670     header_str = u",".join(header) + u"\n"
1671
1672     # Prepare data to the table:
1673     tbl_dict = dict()
1674     for job, builds in table[u"data"].items():
1675         for build in builds:
1676             for tst_name, tst_data in data[job][str(build)].items():
1677                 if tst_name.lower() in table.get(u"ignore-list", list()):
1678                     continue
1679                 if tbl_dict.get(tst_name, None) is None:
1680                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1681                     if not groups:
1682                         continue
1683                     nic = groups.group(0)
1684                     tbl_dict[tst_name] = {
1685                         u"name": f"{nic}-{tst_data[u'name']}",
1686                         u"data": OrderedDict()
1687                     }
1688                 try:
1689                     tbl_dict[tst_name][u"data"][str(build)] = \
1690                         tst_data[u"result"][u"receive-rate"]
1691                 except (TypeError, KeyError):
1692                     pass  # No data in output.xml for this test
1693
1694     tbl_lst = list()
1695     for tst_name in tbl_dict:
1696         data_t = tbl_dict[tst_name][u"data"]
1697         if len(data_t) < 2:
1698             continue
1699
1700         classification_lst, avgs = classify_anomalies(data_t)
1701
1702         win_size = min(len(data_t), table[u"window"])
1703         long_win_size = min(len(data_t), table[u"long-trend-window"])
1704
1705         try:
1706             max_long_avg = max(
1707                 [x for x in avgs[-long_win_size:-win_size]
1708                  if not isnan(x)])
1709         except ValueError:
1710             max_long_avg = nan
1711         last_avg = avgs[-1]
1712         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1713
1714         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1715             rel_change_last = nan
1716         else:
1717             rel_change_last = round(
1718                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1719
1720         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1721             rel_change_long = nan
1722         else:
1723             rel_change_long = round(
1724                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1725
1726         if classification_lst:
1727             if isnan(rel_change_last) and isnan(rel_change_long):
1728                 continue
1729             if isnan(last_avg) or isnan(rel_change_last) or \
1730                     isnan(rel_change_long):
1731                 continue
1732             tbl_lst.append(
1733                 [tbl_dict[tst_name][u"name"],
1734                  round(last_avg / 1000000, 2),
1735                  rel_change_last,
1736                  rel_change_long,
1737                  classification_lst[-win_size:].count(u"regression"),
1738                  classification_lst[-win_size:].count(u"progression")])
1739
1740     tbl_lst.sort(key=lambda rel: rel[0])
1741
1742     tbl_sorted = list()
1743     for nrr in range(table[u"window"], -1, -1):
1744         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1745         for nrp in range(table[u"window"], -1, -1):
1746             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1747             tbl_out.sort(key=lambda rel: rel[2])
1748             tbl_sorted.extend(tbl_out)
1749
1750     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1751
1752     logging.info(f"    Writing file: {file_name}")
1753     with open(file_name, u"wt") as file_handler:
1754         file_handler.write(header_str)
1755         for test in tbl_sorted:
1756             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1757
1758     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1759     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1760
1761
1762 def _generate_url(testbed, test_name):
1763     """Generate URL to a trending plot from the name of the test case.
1764
1765     :param testbed: The testbed used for testing.
1766     :param test_name: The name of the test case.
1767     :type testbed: str
1768     :type test_name: str
1769     :returns: The URL to the plot with the trending data for the given test
1770         case.
1771     :rtype str
1772     """
1773
1774     if u"x520" in test_name:
1775         nic = u"x520"
1776     elif u"x710" in test_name:
1777         nic = u"x710"
1778     elif u"xl710" in test_name:
1779         nic = u"xl710"
1780     elif u"xxv710" in test_name:
1781         nic = u"xxv710"
1782     elif u"vic1227" in test_name:
1783         nic = u"vic1227"
1784     elif u"vic1385" in test_name:
1785         nic = u"vic1385"
1786     elif u"x553" in test_name:
1787         nic = u"x553"
1788     else:
1789         nic = u""
1790
1791     if u"64b" in test_name:
1792         frame_size = u"64b"
1793     elif u"78b" in test_name:
1794         frame_size = u"78b"
1795     elif u"imix" in test_name:
1796         frame_size = u"imix"
1797     elif u"9000b" in test_name:
1798         frame_size = u"9000b"
1799     elif u"1518b" in test_name:
1800         frame_size = u"1518b"
1801     elif u"114b" in test_name:
1802         frame_size = u"114b"
1803     else:
1804         frame_size = u""
1805
1806     if u"1t1c" in test_name or \
1807         (u"-1c-" in test_name and
1808          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1809         cores = u"1t1c"
1810     elif u"2t2c" in test_name or \
1811          (u"-2c-" in test_name and
1812           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1813         cores = u"2t2c"
1814     elif u"4t4c" in test_name or \
1815          (u"-4c-" in test_name and
1816           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1817         cores = u"4t4c"
1818     elif u"2t1c" in test_name or \
1819          (u"-1c-" in test_name and
1820           testbed in (u"2n-skx", u"3n-skx")):
1821         cores = u"2t1c"
1822     elif u"4t2c" in test_name:
1823         cores = u"4t2c"
1824     elif u"8t4c" in test_name:
1825         cores = u"8t4c"
1826     else:
1827         cores = u""
1828
1829     if u"testpmd" in test_name:
1830         driver = u"testpmd"
1831     elif u"l3fwd" in test_name:
1832         driver = u"l3fwd"
1833     elif u"avf" in test_name:
1834         driver = u"avf"
1835     elif u"dnv" in testbed or u"tsh" in testbed:
1836         driver = u"ixgbe"
1837     else:
1838         driver = u"dpdk"
1839
1840     if u"acl" in test_name or \
1841             u"macip" in test_name or \
1842             u"nat" in test_name or \
1843             u"policer" in test_name or \
1844             u"cop" in test_name:
1845         bsf = u"features"
1846     elif u"scale" in test_name:
1847         bsf = u"scale"
1848     elif u"base" in test_name:
1849         bsf = u"base"
1850     else:
1851         bsf = u"base"
1852
1853     if u"114b" in test_name and u"vhost" in test_name:
1854         domain = u"vts"
1855     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1856         domain = u"dpdk"
1857     elif u"memif" in test_name:
1858         domain = u"container_memif"
1859     elif u"srv6" in test_name:
1860         domain = u"srv6"
1861     elif u"vhost" in test_name:
1862         domain = u"vhost"
1863         if u"vppl2xc" in test_name:
1864             driver += u"-vpp"
1865         else:
1866             driver += u"-testpmd"
1867         if u"lbvpplacp" in test_name:
1868             bsf += u"-link-bonding"
1869     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1870         domain = u"nf_service_density_vnfc"
1871     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1872         domain = u"nf_service_density_cnfc"
1873     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1874         domain = u"nf_service_density_cnfp"
1875     elif u"ipsec" in test_name:
1876         domain = u"ipsec"
1877         if u"sw" in test_name:
1878             bsf += u"-sw"
1879         elif u"hw" in test_name:
1880             bsf += u"-hw"
1881     elif u"ethip4vxlan" in test_name:
1882         domain = u"ip4_tunnels"
1883     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1884         domain = u"ip4"
1885     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1886         domain = u"ip6"
1887     elif u"l2xcbase" in test_name or \
1888             u"l2xcscale" in test_name or \
1889             u"l2bdbasemaclrn" in test_name or \
1890             u"l2bdscale" in test_name or \
1891             u"l2patch" in test_name:
1892         domain = u"l2"
1893     else:
1894         domain = u""
1895
1896     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1897     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1898
1899     return file_name + anchor_name
1900
1901
1902 def table_perf_trending_dash_html(table, input_data):
1903     """Generate the table(s) with algorithm:
1904     table_perf_trending_dash_html specified in the specification
1905     file.
1906
1907     :param table: Table to generate.
1908     :param input_data: Data to process.
1909     :type table: dict
1910     :type input_data: InputData
1911     """
1912
1913     _ = input_data
1914
1915     if not table.get(u"testbed", None):
1916         logging.error(
1917             f"The testbed is not defined for the table "
1918             f"{table.get(u'title', u'')}."
1919         )
1920         return
1921
1922     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1923
1924     try:
1925         with open(table[u"input-file"], u'rt') as csv_file:
1926             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1927     except KeyError:
1928         logging.warning(u"The input file is not defined.")
1929         return
1930     except csv.Error as err:
1931         logging.warning(
1932             f"Not possible to process the file {table[u'input-file']}.\n"
1933             f"{repr(err)}"
1934         )
1935         return
1936
1937     # Table:
1938     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1939
1940     # Table header:
1941     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1942     for idx, item in enumerate(csv_lst[0]):
1943         alignment = u"left" if idx == 0 else u"center"
1944         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1945         thead.text = item
1946
1947     # Rows:
1948     colors = {
1949         u"regression": (
1950             u"#ffcccc",
1951             u"#ff9999"
1952         ),
1953         u"progression": (
1954             u"#c6ecc6",
1955             u"#9fdf9f"
1956         ),
1957         u"normal": (
1958             u"#e9f1fb",
1959             u"#d4e4f7"
1960         )
1961     }
1962     for r_idx, row in enumerate(csv_lst[1:]):
1963         if int(row[4]):
1964             color = u"regression"
1965         elif int(row[5]):
1966             color = u"progression"
1967         else:
1968             color = u"normal"
1969         trow = ET.SubElement(
1970             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1971         )
1972
1973         # Columns:
1974         for c_idx, item in enumerate(row):
1975             tdata = ET.SubElement(
1976                 trow,
1977                 u"td",
1978                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1979             )
1980             # Name:
1981             if c_idx == 0:
1982                 ref = ET.SubElement(
1983                     tdata,
1984                     u"a",
1985                     attrib=dict(
1986                         href=f"../trending/"
1987                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1988                     )
1989                 )
1990                 ref.text = item
1991             else:
1992                 tdata.text = item
1993     try:
1994         with open(table[u"output-file"], u'w') as html_file:
1995             logging.info(f"    Writing file: {table[u'output-file']}")
1996             html_file.write(u".. raw:: html\n\n\t")
1997             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1998             html_file.write(u"\n\t<p><br><br></p>\n")
1999     except KeyError:
2000         logging.warning(u"The output file is not defined.")
2001         return
2002
2003
2004 def table_last_failed_tests(table, input_data):
2005     """Generate the table(s) with algorithm: table_last_failed_tests
2006     specified in the specification file.
2007
2008     :param table: Table to generate.
2009     :param input_data: Data to process.
2010     :type table: pandas.Series
2011     :type input_data: InputData
2012     """
2013
2014     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2015
2016     # Transform the data
2017     logging.info(
2018         f"    Creating the data set for the {table.get(u'type', u'')} "
2019         f"{table.get(u'title', u'')}."
2020     )
2021
2022     data = input_data.filter_data(table, continue_on_error=True)
2023
2024     if data is None or data.empty:
2025         logging.warning(
2026             f"    No data for the {table.get(u'type', u'')} "
2027             f"{table.get(u'title', u'')}."
2028         )
2029         return
2030
2031     tbl_list = list()
2032     for job, builds in table[u"data"].items():
2033         for build in builds:
2034             build = str(build)
2035             try:
2036                 version = input_data.metadata(job, build).get(u"version", u"")
2037             except KeyError:
2038                 logging.error(f"Data for {job}: {build} is not present.")
2039                 return
2040             tbl_list.append(build)
2041             tbl_list.append(version)
2042             failed_tests = list()
2043             passed = 0
2044             failed = 0
2045             for tst_data in data[job][build].values:
2046                 if tst_data[u"status"] != u"FAIL":
2047                     passed += 1
2048                     continue
2049                 failed += 1
2050                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2051                 if not groups:
2052                     continue
2053                 nic = groups.group(0)
2054                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2055             tbl_list.append(str(passed))
2056             tbl_list.append(str(failed))
2057             tbl_list.extend(failed_tests)
2058
2059     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2060     logging.info(f"    Writing file: {file_name}")
2061     with open(file_name, u"wt") as file_handler:
2062         for test in tbl_list:
2063             file_handler.write(test + u'\n')
2064
2065
2066 def table_failed_tests(table, input_data):
2067     """Generate the table(s) with algorithm: table_failed_tests
2068     specified in the specification file.
2069
2070     :param table: Table to generate.
2071     :param input_data: Data to process.
2072     :type table: pandas.Series
2073     :type input_data: InputData
2074     """
2075
2076     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2077
2078     # Transform the data
2079     logging.info(
2080         f"    Creating the data set for the {table.get(u'type', u'')} "
2081         f"{table.get(u'title', u'')}."
2082     )
2083     data = input_data.filter_data(table, continue_on_error=True)
2084
2085     # Prepare the header of the tables
2086     header = [
2087         u"Test Case",
2088         u"Failures [#]",
2089         u"Last Failure [Time]",
2090         u"Last Failure [VPP-Build-Id]",
2091         u"Last Failure [CSIT-Job-Build-Id]"
2092     ]
2093
2094     # Generate the data for the table according to the model in the table
2095     # specification
2096
2097     now = dt.utcnow()
2098     timeperiod = timedelta(int(table.get(u"window", 7)))
2099
2100     tbl_dict = dict()
2101     for job, builds in table[u"data"].items():
2102         for build in builds:
2103             build = str(build)
2104             for tst_name, tst_data in data[job][build].items():
2105                 if tst_name.lower() in table.get(u"ignore-list", list()):
2106                     continue
2107                 if tbl_dict.get(tst_name, None) is None:
2108                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
2109                     if not groups:
2110                         continue
2111                     nic = groups.group(0)
2112                     tbl_dict[tst_name] = {
2113                         u"name": f"{nic}-{tst_data[u'name']}",
2114                         u"data": OrderedDict()
2115                     }
2116                 try:
2117                     generated = input_data.metadata(job, build).\
2118                         get(u"generated", u"")
2119                     if not generated:
2120                         continue
2121                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
2122                     if (now - then) <= timeperiod:
2123                         tbl_dict[tst_name][u"data"][build] = (
2124                             tst_data[u"status"],
2125                             generated,
2126                             input_data.metadata(job, build).get(u"version",
2127                                                                 u""),
2128                             build
2129                         )
2130                 except (TypeError, KeyError) as err:
2131                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2132
2133     max_fails = 0
2134     tbl_lst = list()
2135     for tst_data in tbl_dict.values():
2136         fails_nr = 0
2137         fails_last_date = u""
2138         fails_last_vpp = u""
2139         fails_last_csit = u""
2140         for val in tst_data[u"data"].values():
2141             if val[0] == u"FAIL":
2142                 fails_nr += 1
2143                 fails_last_date = val[1]
2144                 fails_last_vpp = val[2]
2145                 fails_last_csit = val[3]
2146         if fails_nr:
2147             max_fails = fails_nr if fails_nr > max_fails else max_fails
2148             tbl_lst.append(
2149                 [
2150                     tst_data[u"name"],
2151                     fails_nr,
2152                     fails_last_date,
2153                     fails_last_vpp,
2154                     f"mrr-daily-build-{fails_last_csit}"
2155                 ]
2156             )
2157
2158     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2159     tbl_sorted = list()
2160     for nrf in range(max_fails, -1, -1):
2161         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2162         tbl_sorted.extend(tbl_fails)
2163
2164     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2165     logging.info(f"    Writing file: {file_name}")
2166     with open(file_name, u"wt") as file_handler:
2167         file_handler.write(u",".join(header) + u"\n")
2168         for test in tbl_sorted:
2169             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2170
2171     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2172     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2173
2174
2175 def table_failed_tests_html(table, input_data):
2176     """Generate the table(s) with algorithm: table_failed_tests_html
2177     specified in the specification file.
2178
2179     :param table: Table to generate.
2180     :param input_data: Data to process.
2181     :type table: pandas.Series
2182     :type input_data: InputData
2183     """
2184
2185     _ = input_data
2186
2187     if not table.get(u"testbed", None):
2188         logging.error(
2189             f"The testbed is not defined for the table "
2190             f"{table.get(u'title', u'')}."
2191         )
2192         return
2193
2194     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2195
2196     try:
2197         with open(table[u"input-file"], u'rt') as csv_file:
2198             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2199     except KeyError:
2200         logging.warning(u"The input file is not defined.")
2201         return
2202     except csv.Error as err:
2203         logging.warning(
2204             f"Not possible to process the file {table[u'input-file']}.\n"
2205             f"{repr(err)}"
2206         )
2207         return
2208
2209     # Table:
2210     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2211
2212     # Table header:
2213     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2214     for idx, item in enumerate(csv_lst[0]):
2215         alignment = u"left" if idx == 0 else u"center"
2216         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2217         thead.text = item
2218
2219     # Rows:
2220     colors = (u"#e9f1fb", u"#d4e4f7")
2221     for r_idx, row in enumerate(csv_lst[1:]):
2222         background = colors[r_idx % 2]
2223         trow = ET.SubElement(
2224             failed_tests, u"tr", attrib=dict(bgcolor=background)
2225         )
2226
2227         # Columns:
2228         for c_idx, item in enumerate(row):
2229             tdata = ET.SubElement(
2230                 trow,
2231                 u"td",
2232                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2233             )
2234             # Name:
2235             if c_idx == 0:
2236                 ref = ET.SubElement(
2237                     tdata,
2238                     u"a",
2239                     attrib=dict(
2240                         href=f"../trending/"
2241                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2242                     )
2243                 )
2244                 ref.text = item
2245             else:
2246                 tdata.text = item
2247     try:
2248         with open(table[u"output-file"], u'w') as html_file:
2249             logging.info(f"    Writing file: {table[u'output-file']}")
2250             html_file.write(u".. raw:: html\n\n\t")
2251             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2252             html_file.write(u"\n\t<p><br><br></p>\n")
2253     except KeyError:
2254         logging.warning(u"The output file is not defined.")
2255         return