Trending: Update graphs - 2n-clx
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
33
34 from pal_utils import mean, stdev, classify_anomalies, \
35     convert_csv_to_pretty_txt, relative_change_stdev
36
37
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
39
40
41 def generate_tables(spec, data):
42     """Generate all tables specified in the specification file.
43
44     :param spec: Specification read from the specification file.
45     :param data: Data to process.
46     :type spec: Specification
47     :type data: InputData
48     """
49
50     generator = {
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100
101     sort_tests = table.get(u"sort", None)
102     if sort_tests:
103         args = dict(
104             inplace=True,
105             ascending=(sort_tests == u"ascending")
106         )
107         data.sort_index(**args)
108
109     suites = input_data.filter_data(
110         table,
111         continue_on_error=True,
112         data_set=u"suites"
113     )
114     if suites.empty:
115         return
116     suites = input_data.merge_data(suites)
117
118     def _generate_html_table(tst_data):
119         """Generate an HTML table with operational data for the given test.
120
121         :param tst_data: Test data to be used to generate the table.
122         :type tst_data: pandas.Series
123         :returns: HTML table with operational data.
124         :rtype: str
125         """
126
127         colors = {
128             u"header": u"#7eade7",
129             u"empty": u"#ffffff",
130             u"body": (u"#e9f1fb", u"#d4e4f7")
131         }
132
133         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134
135         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136         thead = ET.SubElement(
137             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138         )
139         thead.text = tst_data[u"name"]
140
141         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142         thead = ET.SubElement(
143             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144         )
145         thead.text = u"\t"
146
147         if tst_data.get(u"show-run", u"No Data") == u"No Data":
148             trow = ET.SubElement(
149                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150             )
151             tcol = ET.SubElement(
152                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153             )
154             tcol.text = u"No Data"
155
156             trow = ET.SubElement(
157                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158             )
159             thead = ET.SubElement(
160                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161             )
162             font = ET.SubElement(
163                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
164             )
165             font.text = u"."
166             return str(ET.tostring(tbl, encoding=u"unicode"))
167
168         tbl_hdr = (
169             u"Name",
170             u"Nr of Vectors",
171             u"Nr of Packets",
172             u"Suspends",
173             u"Cycles per Packet",
174             u"Average Vector Size"
175         )
176
177         for dut_data in tst_data[u"show-run"].values():
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             if dut_data.get(u"threads", None) is None:
185                 tcol.text = u"No Data"
186                 continue
187
188             bold = ET.SubElement(tcol, u"b")
189             bold.text = (
190                 f"Host IP: {dut_data.get(u'host', '')}, "
191                 f"Socket: {dut_data.get(u'socket', '')}"
192             )
193             trow = ET.SubElement(
194                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195             )
196             thead = ET.SubElement(
197                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
198             )
199             thead.text = u"\t"
200
201             for thread_nr, thread in dut_data[u"threads"].items():
202                 trow = ET.SubElement(
203                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204                 )
205                 tcol = ET.SubElement(
206                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207                 )
208                 bold = ET.SubElement(tcol, u"b")
209                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210                 trow = ET.SubElement(
211                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212                 )
213                 for idx, col in enumerate(tbl_hdr):
214                     tcol = ET.SubElement(
215                         trow, u"td",
216                         attrib=dict(align=u"right" if idx else u"left")
217                     )
218                     font = ET.SubElement(
219                         tcol, u"font", attrib=dict(size=u"2")
220                     )
221                     bold = ET.SubElement(font, u"b")
222                     bold.text = col
223                 for row_nr, row in enumerate(thread):
224                     trow = ET.SubElement(
225                         tbl, u"tr",
226                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227                     )
228                     for idx, col in enumerate(row):
229                         tcol = ET.SubElement(
230                             trow, u"td",
231                             attrib=dict(align=u"right" if idx else u"left")
232                         )
233                         font = ET.SubElement(
234                             tcol, u"font", attrib=dict(size=u"2")
235                         )
236                         if isinstance(col, float):
237                             font.text = f"{col:.2f}"
238                         else:
239                             font.text = str(col)
240                 trow = ET.SubElement(
241                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242                 )
243                 thead = ET.SubElement(
244                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
245                 )
246                 thead.text = u"\t"
247
248         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249         thead = ET.SubElement(
250             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251         )
252         font = ET.SubElement(
253             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
254         )
255         font.text = u"."
256
257         return str(ET.tostring(tbl, encoding=u"unicode"))
258
259     for suite in suites.values:
260         html_table = str()
261         for test_data in data.values:
262             if test_data[u"parent"] not in suite[u"name"]:
263                 continue
264             html_table += _generate_html_table(test_data)
265         if not html_table:
266             continue
267         try:
268             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269             with open(f"{file_name}", u'w') as html_file:
270                 logging.info(f"    Writing file: {file_name}")
271                 html_file.write(u".. raw:: html\n\n\t")
272                 html_file.write(html_table)
273                 html_file.write(u"\n\t<p><br><br></p>\n")
274         except KeyError:
275             logging.warning(u"The output file is not defined.")
276             return
277     logging.info(u"  Done.")
278
279
280 def table_merged_details(table, input_data):
281     """Generate the table(s) with algorithm: table_merged_details
282     specified in the specification file.
283
284     :param table: Table to generate.
285     :param input_data: Data to process.
286     :type table: pandas.Series
287     :type input_data: InputData
288     """
289
290     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
291
292     # Transform the data
293     logging.info(
294         f"    Creating the data set for the {table.get(u'type', u'')} "
295         f"{table.get(u'title', u'')}."
296     )
297     data = input_data.filter_data(table, continue_on_error=True)
298     data = input_data.merge_data(data)
299
300     sort_tests = table.get(u"sort", None)
301     if sort_tests:
302         args = dict(
303             inplace=True,
304             ascending=(sort_tests == u"ascending")
305         )
306         data.sort_index(**args)
307
308     suites = input_data.filter_data(
309         table, continue_on_error=True, data_set=u"suites")
310     suites = input_data.merge_data(suites)
311
312     # Prepare the header of the tables
313     header = list()
314     for column in table[u"columns"]:
315         header.append(
316             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317         )
318
319     for suite in suites.values:
320         # Generate data
321         suite_name = suite[u"name"]
322         table_lst = list()
323         for test in data.keys():
324             if data[test][u"parent"] not in suite_name:
325                 continue
326             row_lst = list()
327             for column in table[u"columns"]:
328                 try:
329                     col_data = str(data[test][column[
330                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
331                     # Do not include tests with "Test Failed" in test message
332                     if u"Test Failed" in col_data:
333                         continue
334                     col_data = col_data.replace(
335                         u"No Data", u"Not Captured     "
336                     )
337                     if column[u"data"].split(u" ")[1] in (u"name", ):
338                         if len(col_data) > 30:
339                             col_data_lst = col_data.split(u"-")
340                             half = int(len(col_data_lst) / 2)
341                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
342                                        f"- |br| " \
343                                        f"{u'-'.join(col_data_lst[half:])}"
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
346                         # Temporary solution: remove NDR results from message:
347                         if bool(table.get(u'remove-ndr', False)):
348                             try:
349                                 col_data = col_data.split(u" |br| ", 1)[1]
350                             except IndexError:
351                                 pass
352                         col_data = f" |prein| {col_data} |preout| "
353                     elif column[u"data"].split(u" ")[1] in \
354                             (u"conf-history", u"show-run"):
355                         col_data = col_data.replace(u" |br| ", u"", 1)
356                         col_data = f" |prein| {col_data[:-5]} |preout| "
357                     row_lst.append(f'"{col_data}"')
358                 except KeyError:
359                     row_lst.append(u'"Not captured"')
360             if len(row_lst) == len(table[u"columns"]):
361                 table_lst.append(row_lst)
362
363         # Write the data to file
364         if table_lst:
365             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367             logging.info(f"      Writing file: {file_name}")
368             with open(file_name, u"wt") as file_handler:
369                 file_handler.write(u",".join(header) + u"\n")
370                 for item in table_lst:
371                     file_handler.write(u",".join(item) + u"\n")
372
373     logging.info(u"  Done.")
374
375
376 def _tpc_modify_test_name(test_name):
377     """Modify a test name by replacing its parts.
378
379     :param test_name: Test name to be modified.
380     :type test_name: str
381     :returns: Modified test name.
382     :rtype: str
383     """
384     test_name_mod = test_name.\
385         replace(u"-ndrpdrdisc", u""). \
386         replace(u"-ndrpdr", u"").\
387         replace(u"-pdrdisc", u""). \
388         replace(u"-ndrdisc", u"").\
389         replace(u"-pdr", u""). \
390         replace(u"-ndr", u""). \
391         replace(u"1t1c", u"1c").\
392         replace(u"2t1c", u"1c"). \
393         replace(u"2t2c", u"2c").\
394         replace(u"4t2c", u"2c"). \
395         replace(u"4t4c", u"4c").\
396         replace(u"8t4c", u"4c")
397
398     return re.sub(REGEX_NIC, u"", test_name_mod)
399
400
401 def _tpc_modify_displayed_test_name(test_name):
402     """Modify a test name which is displayed in a table by replacing its parts.
403
404     :param test_name: Test name to be modified.
405     :type test_name: str
406     :returns: Modified test name.
407     :rtype: str
408     """
409     return test_name.\
410         replace(u"1t1c", u"1c").\
411         replace(u"2t1c", u"1c"). \
412         replace(u"2t2c", u"2c").\
413         replace(u"4t2c", u"2c"). \
414         replace(u"4t4c", u"4c").\
415         replace(u"8t4c", u"4c")
416
417
418 def _tpc_insert_data(target, src, include_tests):
419     """Insert src data to the target structure.
420
421     :param target: Target structure where the data is placed.
422     :param src: Source data to be placed into the target stucture.
423     :param include_tests: Which results will be included (MRR, NDR, PDR).
424     :type target: list
425     :type src: dict
426     :type include_tests: str
427     """
428     try:
429         if include_tests == u"MRR":
430             target.append(
431                 (
432                     src[u"result"][u"receive-rate"],
433                     src[u"result"][u"receive-stdev"]
434                 )
435             )
436         elif include_tests == u"PDR":
437             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438         elif include_tests == u"NDR":
439             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440     except (KeyError, TypeError):
441         pass
442
443
444 def _tpc_sort_table(table):
445     """Sort the table this way:
446
447     1. Put "New in CSIT-XXXX" at the first place.
448     2. Put "See footnote" at the second place.
449     3. Sort the rest by "Delta".
450
451     :param table: Table to sort.
452     :type table: list
453     :returns: Sorted table.
454     :rtype: list
455     """
456
457     tbl_new = list()
458     tbl_see = list()
459     tbl_delta = list()
460     for item in table:
461         if isinstance(item[-1], str):
462             if u"New in CSIT" in item[-1]:
463                 tbl_new.append(item)
464             elif u"See footnote" in item[-1]:
465                 tbl_see.append(item)
466         else:
467             tbl_delta.append(item)
468
469     # Sort the tables:
470     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
475
476     # Put the tables together:
477     table = list()
478     # We do not want "New in CSIT":
479     # table.extend(tbl_new)
480     table.extend(tbl_see)
481     table.extend(tbl_delta)
482
483     return table
484
485
486 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
487                              footnote=u""):
488     """Generate html table from input data with simple sorting possibility.
489
490     :param header: Table header.
491     :param data: Input data to be included in the table. It is a list of lists.
492         Inner lists are rows in the table. All inner lists must be of the same
493         length. The length of these lists must be the same as the length of the
494         header.
495     :param out_file_name: The name (relative or full path) where the
496         generated html table is written.
497     :param legend: The legend to display below the table.
498     :param footnote: The footnote to display below the table (and legend).
499     :type header: list
500     :type data: list of lists
501     :type out_file_name: str
502     :type legend: str
503     :type footnote: str
504     """
505
506     try:
507         idx = header.index(u"Test Case")
508     except ValueError:
509         idx = 0
510     params = {
511         u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
512         u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
513         u"width": ([28, 9], [4, 24, 10])
514     }
515
516     df_data = pd.DataFrame(data, columns=header)
517
518     df_sorted = [df_data.sort_values(
519         by=[key, header[idx]], ascending=[True, True]
520         if key != header[idx] else [False, True]) for key in header]
521     df_sorted_rev = [df_data.sort_values(
522         by=[key, header[idx]], ascending=[False, True]
523         if key != header[idx] else [True, True]) for key in header]
524     df_sorted.extend(df_sorted_rev)
525
526     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
527                    for idx in range(len(df_data))]]
528     table_header = dict(
529         values=[f"<b>{item}</b>" for item in header],
530         fill_color=u"#7eade7",
531         align=params[u"align-hdr"][idx]
532     )
533
534     fig = go.Figure()
535
536     for table in df_sorted:
537         columns = [table.get(col) for col in header]
538         fig.add_trace(
539             go.Table(
540                 columnwidth=params[u"width"][idx],
541                 header=table_header,
542                 cells=dict(
543                     values=columns,
544                     fill_color=fill_color,
545                     align=params[u"align-itm"][idx]
546                 )
547             )
548         )
549
550     buttons = list()
551     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
552     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
553     menu_items.extend(menu_items_rev)
554     for idx, hdr in enumerate(menu_items):
555         visible = [False, ] * len(menu_items)
556         visible[idx] = True
557         buttons.append(
558             dict(
559                 label=hdr.replace(u" [Mpps]", u""),
560                 method=u"update",
561                 args=[{u"visible": visible}],
562             )
563         )
564
565     fig.update_layout(
566         updatemenus=[
567             go.layout.Updatemenu(
568                 type=u"dropdown",
569                 direction=u"down",
570                 x=0.0,
571                 xanchor=u"left",
572                 y=1.045,
573                 yanchor=u"top",
574                 active=len(menu_items) - 1,
575                 buttons=list(buttons)
576             )
577         ]
578     )
579
580     ploff.plot(
581         fig,
582         show_link=False,
583         auto_open=False,
584         filename=f"{out_file_name}_in.html"
585     )
586
587     file_name = out_file_name.split(u"/")[-1]
588     if u"vpp" in out_file_name:
589         path = u"_tmp/src/vpp_performance_tests/comparisons/"
590     else:
591         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
592     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
593         rst_file.write(
594             u"\n"
595             u".. |br| raw:: html\n\n    <br />\n\n\n"
596             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
597             u".. |preout| raw:: html\n\n    </pre>\n\n"
598         )
599         rst_file.write(
600             u".. raw:: html\n\n"
601             f'    <iframe frameborder="0" scrolling="no" '
602             f'width="1600" height="1000" '
603             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
604             f'</iframe>\n\n'
605         )
606         if legend:
607             rst_file.write(legend[1:].replace(u"\n", u" |br| "))
608         if footnote:
609             rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
610
611
612 def table_perf_comparison(table, input_data):
613     """Generate the table(s) with algorithm: table_perf_comparison
614     specified in the specification file.
615
616     :param table: Table to generate.
617     :param input_data: Data to process.
618     :type table: pandas.Series
619     :type input_data: InputData
620     """
621
622     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
623
624     # Transform the data
625     logging.info(
626         f"    Creating the data set for the {table.get(u'type', u'')} "
627         f"{table.get(u'title', u'')}."
628     )
629     data = input_data.filter_data(table, continue_on_error=True)
630
631     # Prepare the header of the tables
632     try:
633         header = [u"Test Case", ]
634         legend = u"\nLegend:\n"
635
636         rca_data = None
637         rca = table.get(u"rca", None)
638         if rca:
639             try:
640                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
641                     rca_data = load(rca_file, Loader=FullLoader)
642                 header.insert(0, rca.get(u"title", "RCA"))
643                 legend += (
644                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
645                 )
646             except (YAMLError, IOError) as err:
647                 logging.warning(repr(err))
648
649         history = table.get(u"history", list())
650         for item in history:
651             header.extend(
652                 [
653                     f"{item[u'title']} Avg({table[u'include-tests']})",
654                     f"{item[u'title']} Stdev({table[u'include-tests']})"
655                 ]
656             )
657             legend += (
658                 f"{item[u'title']} Avg({table[u'include-tests']}): "
659                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
660                 f"a series of runs of the listed tests executed against "
661                 f"{item[u'title']}.\n"
662                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
663                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
664                 f"computed from a series of runs of the listed tests executed "
665                 f"against {item[u'title']}.\n"
666             )
667         header.extend(
668             [
669                 f"{table[u'reference'][u'title']} "
670                 f"Avg({table[u'include-tests']})",
671                 f"{table[u'reference'][u'title']} "
672                 f"Stdev({table[u'include-tests']})",
673                 f"{table[u'compare'][u'title']} "
674                 f"Avg({table[u'include-tests']})",
675                 f"{table[u'compare'][u'title']} "
676                 f"Stdev({table[u'include-tests']})",
677                 f"Diff({table[u'reference'][u'title']},"
678                 f"{table[u'compare'][u'title']})",
679                 u"Stdev(Diff)"
680             ]
681         )
682         header_str = u";".join(header) + u"\n"
683         legend += (
684             f"{table[u'reference'][u'title']} "
685             f"Avg({table[u'include-tests']}): "
686             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
687             f"series of runs of the listed tests executed against "
688             f"{table[u'reference'][u'title']}.\n"
689             f"{table[u'reference'][u'title']} "
690             f"Stdev({table[u'include-tests']}): "
691             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
692             f"computed from a series of runs of the listed tests executed "
693             f"against {table[u'reference'][u'title']}.\n"
694             f"{table[u'compare'][u'title']} "
695             f"Avg({table[u'include-tests']}): "
696             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
697             f"series of runs of the listed tests executed against "
698             f"{table[u'compare'][u'title']}.\n"
699             f"{table[u'compare'][u'title']} "
700             f"Stdev({table[u'include-tests']}): "
701             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
702             f"computed from a series of runs of the listed tests executed "
703             f"against {table[u'compare'][u'title']}.\n"
704             f"Diff({table[u'reference'][u'title']},"
705             f"{table[u'compare'][u'title']}): "
706             f"Percentage change calculated for mean values.\n"
707             u"Stdev(Diff): "
708             u"Standard deviation of percentage change calculated for mean "
709             u"values.\n"
710             u"NT: Not Tested\n"
711         )
712     except (AttributeError, KeyError) as err:
713         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
714         return
715
716     # Prepare data to the table:
717     tbl_dict = dict()
718     for job, builds in table[u"reference"][u"data"].items():
719         for build in builds:
720             for tst_name, tst_data in data[job][str(build)].items():
721                 tst_name_mod = _tpc_modify_test_name(tst_name)
722                 if (u"across topologies" in table[u"title"].lower() or
723                         (u" 3n-" in table[u"title"].lower() and
724                          u" 2n-" in table[u"title"].lower())):
725                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
726                 if tbl_dict.get(tst_name_mod, None) is None:
727                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
728                     nic = groups.group(0) if groups else u""
729                     name = \
730                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
731                     if u"across testbeds" in table[u"title"].lower() or \
732                             u"across topologies" in table[u"title"].lower():
733                         name = _tpc_modify_displayed_test_name(name)
734                     tbl_dict[tst_name_mod] = {
735                         u"name": name,
736                         u"ref-data": list(),
737                         u"cmp-data": list()
738                     }
739                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
740                                  src=tst_data,
741                                  include_tests=table[u"include-tests"])
742
743     replacement = table[u"reference"].get(u"data-replacement", None)
744     if replacement:
745         create_new_list = True
746         rpl_data = input_data.filter_data(
747             table, data=replacement, continue_on_error=True)
748         for job, builds in replacement.items():
749             for build in builds:
750                 for tst_name, tst_data in rpl_data[job][str(build)].items():
751                     tst_name_mod = _tpc_modify_test_name(tst_name)
752                     if (u"across topologies" in table[u"title"].lower() or
753                             (u" 3n-" in table[u"title"].lower() and
754                              u" 2n-" in table[u"title"].lower())):
755                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
756                     if tbl_dict.get(tst_name_mod, None) is None:
757                         name = \
758                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
759                         if u"across testbeds" in table[u"title"].lower() or \
760                                 u"across topologies" in table[u"title"].lower():
761                             name = _tpc_modify_displayed_test_name(name)
762                         tbl_dict[tst_name_mod] = {
763                             u"name": name,
764                             u"ref-data": list(),
765                             u"cmp-data": list()
766                         }
767                     if create_new_list:
768                         create_new_list = False
769                         tbl_dict[tst_name_mod][u"ref-data"] = list()
770
771                     _tpc_insert_data(
772                         target=tbl_dict[tst_name_mod][u"ref-data"],
773                         src=tst_data,
774                         include_tests=table[u"include-tests"]
775                     )
776
777     for job, builds in table[u"compare"][u"data"].items():
778         for build in builds:
779             for tst_name, tst_data in data[job][str(build)].items():
780                 tst_name_mod = _tpc_modify_test_name(tst_name)
781                 if (u"across topologies" in table[u"title"].lower() or
782                         (u" 3n-" in table[u"title"].lower() and
783                          u" 2n-" in table[u"title"].lower())):
784                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
785                 if tbl_dict.get(tst_name_mod, None) is None:
786                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
787                     nic = groups.group(0) if groups else u""
788                     name = \
789                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
790                     if u"across testbeds" in table[u"title"].lower() or \
791                             u"across topologies" in table[u"title"].lower():
792                         name = _tpc_modify_displayed_test_name(name)
793                     tbl_dict[tst_name_mod] = {
794                         u"name": name,
795                         u"ref-data": list(),
796                         u"cmp-data": list()
797                     }
798                 _tpc_insert_data(
799                     target=tbl_dict[tst_name_mod][u"cmp-data"],
800                     src=tst_data,
801                     include_tests=table[u"include-tests"]
802                 )
803
804     replacement = table[u"compare"].get(u"data-replacement", None)
805     if replacement:
806         create_new_list = True
807         rpl_data = input_data.filter_data(
808             table, data=replacement, continue_on_error=True)
809         for job, builds in replacement.items():
810             for build in builds:
811                 for tst_name, tst_data in rpl_data[job][str(build)].items():
812                     tst_name_mod = _tpc_modify_test_name(tst_name)
813                     if (u"across topologies" in table[u"title"].lower() or
814                             (u" 3n-" in table[u"title"].lower() and
815                              u" 2n-" in table[u"title"].lower())):
816                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
817                     if tbl_dict.get(tst_name_mod, None) is None:
818                         name = \
819                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
820                         if u"across testbeds" in table[u"title"].lower() or \
821                                 u"across topologies" in table[u"title"].lower():
822                             name = _tpc_modify_displayed_test_name(name)
823                         tbl_dict[tst_name_mod] = {
824                             u"name": name,
825                             u"ref-data": list(),
826                             u"cmp-data": list()
827                         }
828                     if create_new_list:
829                         create_new_list = False
830                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
831
832                     _tpc_insert_data(
833                         target=tbl_dict[tst_name_mod][u"cmp-data"],
834                         src=tst_data,
835                         include_tests=table[u"include-tests"]
836                     )
837
838     for item in history:
839         for job, builds in item[u"data"].items():
840             for build in builds:
841                 for tst_name, tst_data in data[job][str(build)].items():
842                     tst_name_mod = _tpc_modify_test_name(tst_name)
843                     if (u"across topologies" in table[u"title"].lower() or
844                             (u" 3n-" in table[u"title"].lower() and
845                              u" 2n-" in table[u"title"].lower())):
846                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
847                     if tbl_dict.get(tst_name_mod, None) is None:
848                         continue
849                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
850                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
851                     if tbl_dict[tst_name_mod][u"history"].\
852                             get(item[u"title"], None) is None:
853                         tbl_dict[tst_name_mod][u"history"][item[
854                             u"title"]] = list()
855                     try:
856                         if table[u"include-tests"] == u"MRR":
857                             res = (tst_data[u"result"][u"receive-rate"],
858                                    tst_data[u"result"][u"receive-stdev"])
859                         elif table[u"include-tests"] == u"PDR":
860                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
861                         elif table[u"include-tests"] == u"NDR":
862                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
863                         else:
864                             continue
865                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
866                             append(res)
867                     except (TypeError, KeyError):
868                         pass
869
870     tbl_lst = list()
871     for tst_name in tbl_dict:
872         item = [tbl_dict[tst_name][u"name"], ]
873         if history:
874             if tbl_dict[tst_name].get(u"history", None) is not None:
875                 for hist_data in tbl_dict[tst_name][u"history"].values():
876                     if hist_data:
877                         if table[u"include-tests"] == u"MRR":
878                             item.append(round(hist_data[0][0] / 1e6, 1))
879                             item.append(round(hist_data[0][1] / 1e6, 1))
880                         else:
881                             item.append(round(mean(hist_data) / 1e6, 1))
882                             item.append(round(stdev(hist_data) / 1e6, 1))
883                     else:
884                         item.extend([u"NT", u"NT"])
885             else:
886                 item.extend([u"NT", u"NT"])
887         data_r = tbl_dict[tst_name][u"ref-data"]
888         if data_r:
889             if table[u"include-tests"] == u"MRR":
890                 data_r_mean = data_r[0][0]
891                 data_r_stdev = data_r[0][1]
892             else:
893                 data_r_mean = mean(data_r)
894                 data_r_stdev = stdev(data_r)
895             item.append(round(data_r_mean / 1e6, 1))
896             item.append(round(data_r_stdev / 1e6, 1))
897         else:
898             data_r_mean = None
899             data_r_stdev = None
900             item.extend([u"NT", u"NT"])
901         data_c = tbl_dict[tst_name][u"cmp-data"]
902         if data_c:
903             if table[u"include-tests"] == u"MRR":
904                 data_c_mean = data_c[0][0]
905                 data_c_stdev = data_c[0][1]
906             else:
907                 data_c_mean = mean(data_c)
908                 data_c_stdev = stdev(data_c)
909             item.append(round(data_c_mean / 1e6, 1))
910             item.append(round(data_c_stdev / 1e6, 1))
911         else:
912             data_c_mean = None
913             data_c_stdev = None
914             item.extend([u"NT", u"NT"])
915         if item[-2] == u"NT":
916             pass
917         elif item[-4] == u"NT":
918             item.append(u"New in CSIT-2001")
919             item.append(u"New in CSIT-2001")
920         elif data_r_mean is not None and data_c_mean is not None:
921             delta, d_stdev = relative_change_stdev(
922                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
923             )
924             try:
925                 item.append(round(delta))
926             except ValueError:
927                 item.append(delta)
928             try:
929                 item.append(round(d_stdev))
930             except ValueError:
931                 item.append(d_stdev)
932         if rca_data:
933             rca_nr = rca_data.get(item[0], u"-")
934             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
935         if (len(item) == len(header)) and (item[-4] != u"NT"):
936             tbl_lst.append(item)
937
938     tbl_lst = _tpc_sort_table(tbl_lst)
939
940     # Generate csv tables:
941     csv_file = f"{table[u'output-file']}.csv"
942     with open(csv_file, u"wt") as file_handler:
943         file_handler.write(header_str)
944         for test in tbl_lst:
945             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
946
947     txt_file_name = f"{table[u'output-file']}.txt"
948     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
949
950     footnote = u""
951     with open(txt_file_name, u'a') as txt_file:
952         txt_file.write(legend)
953         if rca_data:
954             footnote = rca_data.get(u"footnote", u"")
955             if footnote:
956                 txt_file.write(footnote)
957         txt_file.write(u":END")
958
959     # Generate html table:
960     _tpc_generate_html_table(
961         header,
962         tbl_lst,
963         table[u'output-file'],
964         legend=legend,
965         footnote=footnote
966     )
967
968
969 def table_perf_comparison_nic(table, input_data):
970     """Generate the table(s) with algorithm: table_perf_comparison
971     specified in the specification file.
972
973     :param table: Table to generate.
974     :param input_data: Data to process.
975     :type table: pandas.Series
976     :type input_data: InputData
977     """
978
979     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
980
981     # Transform the data
982     logging.info(
983         f"    Creating the data set for the {table.get(u'type', u'')} "
984         f"{table.get(u'title', u'')}."
985     )
986     data = input_data.filter_data(table, continue_on_error=True)
987
988     # Prepare the header of the tables
989     try:
990         header = [u"Test Case", ]
991         legend = u"\nLegend:\n"
992
993         rca_data = None
994         rca = table.get(u"rca", None)
995         if rca:
996             try:
997                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
998                     rca_data = load(rca_file, Loader=FullLoader)
999                 header.insert(0, rca.get(u"title", "RCA"))
1000                 legend += (
1001                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
1002                 )
1003             except (YAMLError, IOError) as err:
1004                 logging.warning(repr(err))
1005
1006         history = table.get(u"history", list())
1007         for item in history:
1008             header.extend(
1009                 [
1010                     f"{item[u'title']} Avg({table[u'include-tests']})",
1011                     f"{item[u'title']} Stdev({table[u'include-tests']})"
1012                 ]
1013             )
1014             legend += (
1015                 f"{item[u'title']} Avg({table[u'include-tests']}): "
1016                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1017                 f"a series of runs of the listed tests executed against "
1018                 f"{item[u'title']}.\n"
1019                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1020                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1021                 f"computed from a series of runs of the listed tests executed "
1022                 f"against {item[u'title']}.\n"
1023             )
1024         header.extend(
1025             [
1026                 f"{table[u'reference'][u'title']} "
1027                 f"Avg({table[u'include-tests']})",
1028                 f"{table[u'reference'][u'title']} "
1029                 f"Stdev({table[u'include-tests']})",
1030                 f"{table[u'compare'][u'title']} "
1031                 f"Avg({table[u'include-tests']})",
1032                 f"{table[u'compare'][u'title']} "
1033                 f"Stdev({table[u'include-tests']})",
1034                 f"Diff({table[u'reference'][u'title']},"
1035                 f"{table[u'compare'][u'title']})",
1036                 u"Stdev(Diff)"
1037             ]
1038         )
1039         header_str = u";".join(header) + u"\n"
1040         legend += (
1041             f"{table[u'reference'][u'title']} "
1042             f"Avg({table[u'include-tests']}): "
1043             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1044             f"series of runs of the listed tests executed against "
1045             f"{table[u'reference'][u'title']}.\n"
1046             f"{table[u'reference'][u'title']} "
1047             f"Stdev({table[u'include-tests']}): "
1048             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1049             f"computed from a series of runs of the listed tests executed "
1050             f"against {table[u'reference'][u'title']}.\n"
1051             f"{table[u'compare'][u'title']} "
1052             f"Avg({table[u'include-tests']}): "
1053             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1054             f"series of runs of the listed tests executed against "
1055             f"{table[u'compare'][u'title']}.\n"
1056             f"{table[u'compare'][u'title']} "
1057             f"Stdev({table[u'include-tests']}): "
1058             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1059             f"computed from a series of runs of the listed tests executed "
1060             f"against {table[u'compare'][u'title']}.\n"
1061             f"Diff({table[u'reference'][u'title']},"
1062             f"{table[u'compare'][u'title']}): "
1063             f"Percentage change calculated for mean values.\n"
1064             u"Stdev(Diff): "
1065             u"Standard deviation of percentage change calculated for mean "
1066             u"values.\n"
1067             u"NT: Not Tested\n"
1068         )
1069     except (AttributeError, KeyError) as err:
1070         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1071         return
1072
1073     # Prepare data to the table:
1074     tbl_dict = dict()
1075     for job, builds in table[u"reference"][u"data"].items():
1076         for build in builds:
1077             for tst_name, tst_data in data[job][str(build)].items():
1078                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1079                     continue
1080                 tst_name_mod = _tpc_modify_test_name(tst_name)
1081                 if (u"across topologies" in table[u"title"].lower() or
1082                         (u" 3n-" in table[u"title"].lower() and
1083                          u" 2n-" in table[u"title"].lower())):
1084                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1085                 if tbl_dict.get(tst_name_mod, None) is None:
1086                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1087                     if u"across testbeds" in table[u"title"].lower() or \
1088                             u"across topologies" in table[u"title"].lower():
1089                         name = _tpc_modify_displayed_test_name(name)
1090                     tbl_dict[tst_name_mod] = {
1091                         u"name": name,
1092                         u"ref-data": list(),
1093                         u"cmp-data": list()
1094                     }
1095                 _tpc_insert_data(
1096                     target=tbl_dict[tst_name_mod][u"ref-data"],
1097                     src=tst_data,
1098                     include_tests=table[u"include-tests"]
1099                 )
1100
1101     replacement = table[u"reference"].get(u"data-replacement", None)
1102     if replacement:
1103         create_new_list = True
1104         rpl_data = input_data.filter_data(
1105             table, data=replacement, continue_on_error=True)
1106         for job, builds in replacement.items():
1107             for build in builds:
1108                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1109                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1110                         continue
1111                     tst_name_mod = _tpc_modify_test_name(tst_name)
1112                     if (u"across topologies" in table[u"title"].lower() or
1113                             (u" 3n-" in table[u"title"].lower() and
1114                              u" 2n-" in table[u"title"].lower())):
1115                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1116                     if tbl_dict.get(tst_name_mod, None) is None:
1117                         name = \
1118                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1119                         if u"across testbeds" in table[u"title"].lower() or \
1120                                 u"across topologies" in table[u"title"].lower():
1121                             name = _tpc_modify_displayed_test_name(name)
1122                         tbl_dict[tst_name_mod] = {
1123                             u"name": name,
1124                             u"ref-data": list(),
1125                             u"cmp-data": list()
1126                         }
1127                     if create_new_list:
1128                         create_new_list = False
1129                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1130
1131                     _tpc_insert_data(
1132                         target=tbl_dict[tst_name_mod][u"ref-data"],
1133                         src=tst_data,
1134                         include_tests=table[u"include-tests"]
1135                     )
1136
1137     for job, builds in table[u"compare"][u"data"].items():
1138         for build in builds:
1139             for tst_name, tst_data in data[job][str(build)].items():
1140                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1141                     continue
1142                 tst_name_mod = _tpc_modify_test_name(tst_name)
1143                 if (u"across topologies" in table[u"title"].lower() or
1144                         (u" 3n-" in table[u"title"].lower() and
1145                          u" 2n-" in table[u"title"].lower())):
1146                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1147                 if tbl_dict.get(tst_name_mod, None) is None:
1148                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1149                     if u"across testbeds" in table[u"title"].lower() or \
1150                             u"across topologies" in table[u"title"].lower():
1151                         name = _tpc_modify_displayed_test_name(name)
1152                     tbl_dict[tst_name_mod] = {
1153                         u"name": name,
1154                         u"ref-data": list(),
1155                         u"cmp-data": list()
1156                     }
1157                 _tpc_insert_data(
1158                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1159                     src=tst_data,
1160                     include_tests=table[u"include-tests"]
1161                 )
1162
1163     replacement = table[u"compare"].get(u"data-replacement", None)
1164     if replacement:
1165         create_new_list = True
1166         rpl_data = input_data.filter_data(
1167             table, data=replacement, continue_on_error=True)
1168         for job, builds in replacement.items():
1169             for build in builds:
1170                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1171                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1172                         continue
1173                     tst_name_mod = _tpc_modify_test_name(tst_name)
1174                     if (u"across topologies" in table[u"title"].lower() or
1175                             (u" 3n-" in table[u"title"].lower() and
1176                              u" 2n-" in table[u"title"].lower())):
1177                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1178                     if tbl_dict.get(tst_name_mod, None) is None:
1179                         name = \
1180                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1181                         if u"across testbeds" in table[u"title"].lower() or \
1182                                 u"across topologies" in table[u"title"].lower():
1183                             name = _tpc_modify_displayed_test_name(name)
1184                         tbl_dict[tst_name_mod] = {
1185                             u"name": name,
1186                             u"ref-data": list(),
1187                             u"cmp-data": list()
1188                         }
1189                     if create_new_list:
1190                         create_new_list = False
1191                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1192
1193                     _tpc_insert_data(
1194                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1195                         src=tst_data,
1196                         include_tests=table[u"include-tests"]
1197                     )
1198
1199     for item in history:
1200         for job, builds in item[u"data"].items():
1201             for build in builds:
1202                 for tst_name, tst_data in data[job][str(build)].items():
1203                     if item[u"nic"] not in tst_data[u"tags"]:
1204                         continue
1205                     tst_name_mod = _tpc_modify_test_name(tst_name)
1206                     if (u"across topologies" in table[u"title"].lower() or
1207                             (u" 3n-" in table[u"title"].lower() and
1208                              u" 2n-" in table[u"title"].lower())):
1209                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1210                     if tbl_dict.get(tst_name_mod, None) is None:
1211                         continue
1212                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1213                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1214                     if tbl_dict[tst_name_mod][u"history"].\
1215                             get(item[u"title"], None) is None:
1216                         tbl_dict[tst_name_mod][u"history"][item[
1217                             u"title"]] = list()
1218                     try:
1219                         if table[u"include-tests"] == u"MRR":
1220                             res = (tst_data[u"result"][u"receive-rate"],
1221                                    tst_data[u"result"][u"receive-stdev"])
1222                         elif table[u"include-tests"] == u"PDR":
1223                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1224                         elif table[u"include-tests"] == u"NDR":
1225                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1226                         else:
1227                             continue
1228                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1229                             append(res)
1230                     except (TypeError, KeyError):
1231                         pass
1232
1233     tbl_lst = list()
1234     for tst_name in tbl_dict:
1235         item = [tbl_dict[tst_name][u"name"], ]
1236         if history:
1237             if tbl_dict[tst_name].get(u"history", None) is not None:
1238                 for hist_data in tbl_dict[tst_name][u"history"].values():
1239                     if hist_data:
1240                         if table[u"include-tests"] == u"MRR":
1241                             item.append(round(hist_data[0][0] / 1e6, 1))
1242                             item.append(round(hist_data[0][1] / 1e6, 1))
1243                         else:
1244                             item.append(round(mean(hist_data) / 1e6, 1))
1245                             item.append(round(stdev(hist_data) / 1e6, 1))
1246                     else:
1247                         item.extend([u"NT", u"NT"])
1248             else:
1249                 item.extend([u"NT", u"NT"])
1250         data_r = tbl_dict[tst_name][u"ref-data"]
1251         if data_r:
1252             if table[u"include-tests"] == u"MRR":
1253                 data_r_mean = data_r[0][0]
1254                 data_r_stdev = data_r[0][1]
1255             else:
1256                 data_r_mean = mean(data_r)
1257                 data_r_stdev = stdev(data_r)
1258             item.append(round(data_r_mean / 1e6, 1))
1259             item.append(round(data_r_stdev / 1e6, 1))
1260         else:
1261             data_r_mean = None
1262             data_r_stdev = None
1263             item.extend([u"NT", u"NT"])
1264         data_c = tbl_dict[tst_name][u"cmp-data"]
1265         if data_c:
1266             if table[u"include-tests"] == u"MRR":
1267                 data_c_mean = data_c[0][0]
1268                 data_c_stdev = data_c[0][1]
1269             else:
1270                 data_c_mean = mean(data_c)
1271                 data_c_stdev = stdev(data_c)
1272             item.append(round(data_c_mean / 1e6, 1))
1273             item.append(round(data_c_stdev / 1e6, 1))
1274         else:
1275             data_c_mean = None
1276             data_c_stdev = None
1277             item.extend([u"NT", u"NT"])
1278         if item[-2] == u"NT":
1279             pass
1280         elif item[-4] == u"NT":
1281             item.append(u"New in CSIT-2001")
1282             item.append(u"New in CSIT-2001")
1283         elif data_r_mean is not None and data_c_mean is not None:
1284             delta, d_stdev = relative_change_stdev(
1285                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1286             )
1287             try:
1288                 item.append(round(delta))
1289             except ValueError:
1290                 item.append(delta)
1291             try:
1292                 item.append(round(d_stdev))
1293             except ValueError:
1294                 item.append(d_stdev)
1295         if rca_data:
1296             rca_nr = rca_data.get(item[0], u"-")
1297             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1298         if (len(item) == len(header)) and (item[-4] != u"NT"):
1299             tbl_lst.append(item)
1300
1301     tbl_lst = _tpc_sort_table(tbl_lst)
1302
1303     # Generate csv tables:
1304     csv_file = f"{table[u'output-file']}.csv"
1305     with open(csv_file, u"wt") as file_handler:
1306         file_handler.write(header_str)
1307         for test in tbl_lst:
1308             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1309
1310     txt_file_name = f"{table[u'output-file']}.txt"
1311     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1312
1313     footnote = u""
1314     with open(txt_file_name, u'a') as txt_file:
1315         txt_file.write(legend)
1316         if rca_data:
1317             footnote = rca_data.get(u"footnote", u"")
1318             if footnote:
1319                 txt_file.write(footnote)
1320         txt_file.write(u":END")
1321
1322     # Generate html table:
1323     _tpc_generate_html_table(
1324         header,
1325         tbl_lst,
1326         table[u'output-file'],
1327         legend=legend,
1328         footnote=footnote
1329     )
1330
1331
1332 def table_nics_comparison(table, input_data):
1333     """Generate the table(s) with algorithm: table_nics_comparison
1334     specified in the specification file.
1335
1336     :param table: Table to generate.
1337     :param input_data: Data to process.
1338     :type table: pandas.Series
1339     :type input_data: InputData
1340     """
1341
1342     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1343
1344     # Transform the data
1345     logging.info(
1346         f"    Creating the data set for the {table.get(u'type', u'')} "
1347         f"{table.get(u'title', u'')}."
1348     )
1349     data = input_data.filter_data(table, continue_on_error=True)
1350
1351     # Prepare the header of the tables
1352     try:
1353         header = [
1354             u"Test Case",
1355             f"{table[u'reference'][u'title']} "
1356             f"Avg({table[u'include-tests']})",
1357             f"{table[u'reference'][u'title']} "
1358             f"Stdev({table[u'include-tests']})",
1359             f"{table[u'compare'][u'title']} "
1360             f"Avg({table[u'include-tests']})",
1361             f"{table[u'compare'][u'title']} "
1362             f"Stdev({table[u'include-tests']})",
1363             f"Diff({table[u'reference'][u'title']},"
1364             f"{table[u'compare'][u'title']})",
1365             u"Stdev(Diff)"
1366         ]
1367         legend = (
1368             u"\nLegend:\n"
1369             f"{table[u'reference'][u'title']} "
1370             f"Avg({table[u'include-tests']}): "
1371             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1372             f"series of runs of the listed tests executed using "
1373             f"{table[u'reference'][u'title']} NIC.\n"
1374             f"{table[u'reference'][u'title']} "
1375             f"Stdev({table[u'include-tests']}): "
1376             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1377             f"computed from a series of runs of the listed tests executed "
1378             f"using {table[u'reference'][u'title']} NIC.\n"
1379             f"{table[u'compare'][u'title']} "
1380             f"Avg({table[u'include-tests']}): "
1381             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1382             f"series of runs of the listed tests executed using "
1383             f"{table[u'compare'][u'title']} NIC.\n"
1384             f"{table[u'compare'][u'title']} "
1385             f"Stdev({table[u'include-tests']}): "
1386             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1387             f"computed from a series of runs of the listed tests executed "
1388             f"using {table[u'compare'][u'title']} NIC.\n"
1389             f"Diff({table[u'reference'][u'title']},"
1390             f"{table[u'compare'][u'title']}): "
1391             f"Percentage change calculated for mean values.\n"
1392             u"Stdev(Diff): "
1393             u"Standard deviation of percentage change calculated for mean "
1394             u"values.\n"
1395             u":END"
1396         )
1397
1398     except (AttributeError, KeyError) as err:
1399         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1400         return
1401
1402     # Prepare data to the table:
1403     tbl_dict = dict()
1404     for job, builds in table[u"data"].items():
1405         for build in builds:
1406             for tst_name, tst_data in data[job][str(build)].items():
1407                 tst_name_mod = _tpc_modify_test_name(tst_name)
1408                 if tbl_dict.get(tst_name_mod, None) is None:
1409                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1410                     tbl_dict[tst_name_mod] = {
1411                         u"name": name,
1412                         u"ref-data": list(),
1413                         u"cmp-data": list()
1414                     }
1415                 try:
1416                     if table[u"include-tests"] == u"MRR":
1417                         result = (tst_data[u"result"][u"receive-rate"],
1418                                   tst_data[u"result"][u"receive-stdev"])
1419                     elif table[u"include-tests"] == u"PDR":
1420                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1421                     elif table[u"include-tests"] == u"NDR":
1422                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1423                     else:
1424                         continue
1425
1426                     if result and \
1427                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1428                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1429                     elif result and \
1430                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1431                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1432                 except (TypeError, KeyError) as err:
1433                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1434                     # No data in output.xml for this test
1435
1436     tbl_lst = list()
1437     for tst_name in tbl_dict:
1438         item = [tbl_dict[tst_name][u"name"], ]
1439         data_r = tbl_dict[tst_name][u"ref-data"]
1440         if data_r:
1441             if table[u"include-tests"] == u"MRR":
1442                 data_r_mean = data_r[0][0]
1443                 data_r_stdev = data_r[0][1]
1444             else:
1445                 data_r_mean = mean(data_r)
1446                 data_r_stdev = stdev(data_r)
1447             item.append(round(data_r_mean / 1e6, 1))
1448             item.append(round(data_r_stdev / 1e6, 1))
1449         else:
1450             data_r_mean = None
1451             data_r_stdev = None
1452             item.extend([None, None])
1453         data_c = tbl_dict[tst_name][u"cmp-data"]
1454         if data_c:
1455             if table[u"include-tests"] == u"MRR":
1456                 data_c_mean = data_c[0][0]
1457                 data_c_stdev = data_c[0][1]
1458             else:
1459                 data_c_mean = mean(data_c)
1460                 data_c_stdev = stdev(data_c)
1461             item.append(round(data_c_mean / 1e6, 1))
1462             item.append(round(data_c_stdev / 1e6, 1))
1463         else:
1464             data_c_mean = None
1465             data_c_stdev = None
1466             item.extend([None, None])
1467         if data_r_mean is not None and data_c_mean is not None:
1468             delta, d_stdev = relative_change_stdev(
1469                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1470             )
1471             try:
1472                 item.append(round(delta))
1473             except ValueError:
1474                 item.append(delta)
1475             try:
1476                 item.append(round(d_stdev))
1477             except ValueError:
1478                 item.append(d_stdev)
1479             tbl_lst.append(item)
1480
1481     # Sort the table according to the relative change
1482     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1483
1484     # Generate csv tables:
1485     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1486         file_handler.write(u";".join(header) + u"\n")
1487         for test in tbl_lst:
1488             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1489
1490     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1491                               f"{table[u'output-file']}.txt",
1492                               delimiter=u";")
1493
1494     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1495         txt_file.write(legend)
1496
1497     # Generate html table:
1498     _tpc_generate_html_table(
1499         header,
1500         tbl_lst,
1501         table[u'output-file'],
1502         legend=legend
1503     )
1504
1505
1506 def table_soak_vs_ndr(table, input_data):
1507     """Generate the table(s) with algorithm: table_soak_vs_ndr
1508     specified in the specification file.
1509
1510     :param table: Table to generate.
1511     :param input_data: Data to process.
1512     :type table: pandas.Series
1513     :type input_data: InputData
1514     """
1515
1516     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1517
1518     # Transform the data
1519     logging.info(
1520         f"    Creating the data set for the {table.get(u'type', u'')} "
1521         f"{table.get(u'title', u'')}."
1522     )
1523     data = input_data.filter_data(table, continue_on_error=True)
1524
1525     # Prepare the header of the table
1526     try:
1527         header = [
1528             u"Test Case",
1529             f"Avg({table[u'reference'][u'title']})",
1530             f"Stdev({table[u'reference'][u'title']})",
1531             f"Avg({table[u'compare'][u'title']})",
1532             f"Stdev{table[u'compare'][u'title']})",
1533             u"Diff",
1534             u"Stdev(Diff)"
1535         ]
1536         header_str = u";".join(header) + u"\n"
1537         legend = (
1538             u"\nLegend:\n"
1539             f"Avg({table[u'reference'][u'title']}): "
1540             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1541             f"from a series of runs of the listed tests.\n"
1542             f"Stdev({table[u'reference'][u'title']}): "
1543             f"Standard deviation value of {table[u'reference'][u'title']} "
1544             f"[Mpps] computed from a series of runs of the listed tests.\n"
1545             f"Avg({table[u'compare'][u'title']}): "
1546             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1547             f"a series of runs of the listed tests.\n"
1548             f"Stdev({table[u'compare'][u'title']}): "
1549             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1550             f"computed from a series of runs of the listed tests.\n"
1551             f"Diff({table[u'reference'][u'title']},"
1552             f"{table[u'compare'][u'title']}): "
1553             f"Percentage change calculated for mean values.\n"
1554             u"Stdev(Diff): "
1555             u"Standard deviation of percentage change calculated for mean "
1556             u"values.\n"
1557             u":END"
1558         )
1559     except (AttributeError, KeyError) as err:
1560         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1561         return
1562
1563     # Create a list of available SOAK test results:
1564     tbl_dict = dict()
1565     for job, builds in table[u"compare"][u"data"].items():
1566         for build in builds:
1567             for tst_name, tst_data in data[job][str(build)].items():
1568                 if tst_data[u"type"] == u"SOAK":
1569                     tst_name_mod = tst_name.replace(u"-soak", u"")
1570                     if tbl_dict.get(tst_name_mod, None) is None:
1571                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1572                         nic = groups.group(0) if groups else u""
1573                         name = (
1574                             f"{nic}-"
1575                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1576                         )
1577                         tbl_dict[tst_name_mod] = {
1578                             u"name": name,
1579                             u"ref-data": list(),
1580                             u"cmp-data": list()
1581                         }
1582                     try:
1583                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1584                             tst_data[u"throughput"][u"LOWER"])
1585                     except (KeyError, TypeError):
1586                         pass
1587     tests_lst = tbl_dict.keys()
1588
1589     # Add corresponding NDR test results:
1590     for job, builds in table[u"reference"][u"data"].items():
1591         for build in builds:
1592             for tst_name, tst_data in data[job][str(build)].items():
1593                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1594                     replace(u"-mrr", u"")
1595                 if tst_name_mod not in tests_lst:
1596                     continue
1597                 try:
1598                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1599                         continue
1600                     if table[u"include-tests"] == u"MRR":
1601                         result = (tst_data[u"result"][u"receive-rate"],
1602                                   tst_data[u"result"][u"receive-stdev"])
1603                     elif table[u"include-tests"] == u"PDR":
1604                         result = \
1605                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1606                     elif table[u"include-tests"] == u"NDR":
1607                         result = \
1608                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1609                     else:
1610                         result = None
1611                     if result is not None:
1612                         tbl_dict[tst_name_mod][u"ref-data"].append(
1613                             result)
1614                 except (KeyError, TypeError):
1615                     continue
1616
1617     tbl_lst = list()
1618     for tst_name in tbl_dict:
1619         item = [tbl_dict[tst_name][u"name"], ]
1620         data_r = tbl_dict[tst_name][u"ref-data"]
1621         if data_r:
1622             if table[u"include-tests"] == u"MRR":
1623                 data_r_mean = data_r[0][0]
1624                 data_r_stdev = data_r[0][1]
1625             else:
1626                 data_r_mean = mean(data_r)
1627                 data_r_stdev = stdev(data_r)
1628             item.append(round(data_r_mean / 1e6, 1))
1629             item.append(round(data_r_stdev / 1e6, 1))
1630         else:
1631             data_r_mean = None
1632             data_r_stdev = None
1633             item.extend([None, None])
1634         data_c = tbl_dict[tst_name][u"cmp-data"]
1635         if data_c:
1636             if table[u"include-tests"] == u"MRR":
1637                 data_c_mean = data_c[0][0]
1638                 data_c_stdev = data_c[0][1]
1639             else:
1640                 data_c_mean = mean(data_c)
1641                 data_c_stdev = stdev(data_c)
1642             item.append(round(data_c_mean / 1e6, 1))
1643             item.append(round(data_c_stdev / 1e6, 1))
1644         else:
1645             data_c_mean = None
1646             data_c_stdev = None
1647             item.extend([None, None])
1648         if data_r_mean is not None and data_c_mean is not None:
1649             delta, d_stdev = relative_change_stdev(
1650                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1651             try:
1652                 item.append(round(delta))
1653             except ValueError:
1654                 item.append(delta)
1655             try:
1656                 item.append(round(d_stdev))
1657             except ValueError:
1658                 item.append(d_stdev)
1659             tbl_lst.append(item)
1660
1661     # Sort the table according to the relative change
1662     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1663
1664     # Generate csv tables:
1665     csv_file = f"{table[u'output-file']}.csv"
1666     with open(csv_file, u"wt") as file_handler:
1667         file_handler.write(header_str)
1668         for test in tbl_lst:
1669             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1670
1671     convert_csv_to_pretty_txt(
1672         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1673     )
1674     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1675         txt_file.write(legend)
1676
1677     # Generate html table:
1678     _tpc_generate_html_table(
1679         header,
1680         tbl_lst,
1681         table[u'output-file'],
1682         legend=legend
1683     )
1684
1685
1686 def table_perf_trending_dash(table, input_data):
1687     """Generate the table(s) with algorithm:
1688     table_perf_trending_dash
1689     specified in the specification file.
1690
1691     :param table: Table to generate.
1692     :param input_data: Data to process.
1693     :type table: pandas.Series
1694     :type input_data: InputData
1695     """
1696
1697     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1698
1699     # Transform the data
1700     logging.info(
1701         f"    Creating the data set for the {table.get(u'type', u'')} "
1702         f"{table.get(u'title', u'')}."
1703     )
1704     data = input_data.filter_data(table, continue_on_error=True)
1705
1706     # Prepare the header of the tables
1707     header = [
1708         u"Test Case",
1709         u"Trend [Mpps]",
1710         u"Short-Term Change [%]",
1711         u"Long-Term Change [%]",
1712         u"Regressions [#]",
1713         u"Progressions [#]"
1714     ]
1715     header_str = u",".join(header) + u"\n"
1716
1717     # Prepare data to the table:
1718     tbl_dict = dict()
1719     for job, builds in table[u"data"].items():
1720         for build in builds:
1721             for tst_name, tst_data in data[job][str(build)].items():
1722                 if tst_name.lower() in table.get(u"ignore-list", list()):
1723                     continue
1724                 if tbl_dict.get(tst_name, None) is None:
1725                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1726                     if not groups:
1727                         continue
1728                     nic = groups.group(0)
1729                     tbl_dict[tst_name] = {
1730                         u"name": f"{nic}-{tst_data[u'name']}",
1731                         u"data": OrderedDict()
1732                     }
1733                 try:
1734                     tbl_dict[tst_name][u"data"][str(build)] = \
1735                         tst_data[u"result"][u"receive-rate"]
1736                 except (TypeError, KeyError):
1737                     pass  # No data in output.xml for this test
1738
1739     tbl_lst = list()
1740     for tst_name in tbl_dict:
1741         data_t = tbl_dict[tst_name][u"data"]
1742         if len(data_t) < 2:
1743             continue
1744
1745         classification_lst, avgs = classify_anomalies(data_t)
1746
1747         win_size = min(len(data_t), table[u"window"])
1748         long_win_size = min(len(data_t), table[u"long-trend-window"])
1749
1750         try:
1751             max_long_avg = max(
1752                 [x for x in avgs[-long_win_size:-win_size]
1753                  if not isnan(x)])
1754         except ValueError:
1755             max_long_avg = nan
1756         last_avg = avgs[-1]
1757         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1758
1759         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1760             rel_change_last = nan
1761         else:
1762             rel_change_last = round(
1763                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1764
1765         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1766             rel_change_long = nan
1767         else:
1768             rel_change_long = round(
1769                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1770
1771         if classification_lst:
1772             if isnan(rel_change_last) and isnan(rel_change_long):
1773                 continue
1774             if isnan(last_avg) or isnan(rel_change_last) or \
1775                     isnan(rel_change_long):
1776                 continue
1777             tbl_lst.append(
1778                 [tbl_dict[tst_name][u"name"],
1779                  round(last_avg / 1e6, 2),
1780                  rel_change_last,
1781                  rel_change_long,
1782                  classification_lst[-win_size:].count(u"regression"),
1783                  classification_lst[-win_size:].count(u"progression")])
1784
1785     tbl_lst.sort(key=lambda rel: rel[0])
1786
1787     tbl_sorted = list()
1788     for nrr in range(table[u"window"], -1, -1):
1789         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1790         for nrp in range(table[u"window"], -1, -1):
1791             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1792             tbl_out.sort(key=lambda rel: rel[2])
1793             tbl_sorted.extend(tbl_out)
1794
1795     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1796
1797     logging.info(f"    Writing file: {file_name}")
1798     with open(file_name, u"wt") as file_handler:
1799         file_handler.write(header_str)
1800         for test in tbl_sorted:
1801             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1802
1803     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1804     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1805
1806
1807 def _generate_url(testbed, test_name):
1808     """Generate URL to a trending plot from the name of the test case.
1809
1810     :param testbed: The testbed used for testing.
1811     :param test_name: The name of the test case.
1812     :type testbed: str
1813     :type test_name: str
1814     :returns: The URL to the plot with the trending data for the given test
1815         case.
1816     :rtype str
1817     """
1818
1819     if u"x520" in test_name:
1820         nic = u"x520"
1821     elif u"x710" in test_name:
1822         nic = u"x710"
1823     elif u"xl710" in test_name:
1824         nic = u"xl710"
1825     elif u"xxv710" in test_name:
1826         nic = u"xxv710"
1827     elif u"vic1227" in test_name:
1828         nic = u"vic1227"
1829     elif u"vic1385" in test_name:
1830         nic = u"vic1385"
1831     elif u"x553" in test_name:
1832         nic = u"x553"
1833     elif u"cx556" in test_name or u"cx556a" in test_name:
1834         nic = u"cx556a"
1835     else:
1836         nic = u""
1837
1838     if u"64b" in test_name:
1839         frame_size = u"64b"
1840     elif u"78b" in test_name:
1841         frame_size = u"78b"
1842     elif u"imix" in test_name:
1843         frame_size = u"imix"
1844     elif u"9000b" in test_name:
1845         frame_size = u"9000b"
1846     elif u"1518b" in test_name:
1847         frame_size = u"1518b"
1848     elif u"114b" in test_name:
1849         frame_size = u"114b"
1850     else:
1851         frame_size = u""
1852
1853     if u"1t1c" in test_name or \
1854         (u"-1c-" in test_name and
1855          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1856         cores = u"1t1c"
1857     elif u"2t2c" in test_name or \
1858          (u"-2c-" in test_name and
1859           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1860         cores = u"2t2c"
1861     elif u"4t4c" in test_name or \
1862          (u"-4c-" in test_name and
1863           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1864         cores = u"4t4c"
1865     elif u"2t1c" in test_name or \
1866          (u"-1c-" in test_name and
1867           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1868         cores = u"2t1c"
1869     elif u"4t2c" in test_name or \
1870          (u"-2c-" in test_name and
1871           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1872         cores = u"4t2c"
1873     elif u"8t4c" in test_name or \
1874          (u"-4c-" in test_name and
1875           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1876         cores = u"8t4c"
1877     else:
1878         cores = u""
1879
1880     if u"testpmd" in test_name:
1881         driver = u"testpmd"
1882     elif u"l3fwd" in test_name:
1883         driver = u"l3fwd"
1884     elif u"avf" in test_name:
1885         driver = u"avf"
1886     elif u"rdma" in test_name:
1887         driver = u"rdma"
1888     elif u"dnv" in testbed or u"tsh" in testbed:
1889         driver = u"ixgbe"
1890     else:
1891         driver = u"dpdk"
1892
1893     if u"acl" in test_name or \
1894             u"macip" in test_name or \
1895             u"nat" in test_name or \
1896             u"policer" in test_name or \
1897             u"cop" in test_name:
1898         bsf = u"features"
1899     elif u"scale" in test_name:
1900         bsf = u"scale"
1901     elif u"base" in test_name:
1902         bsf = u"base"
1903     else:
1904         bsf = u"base"
1905
1906     if u"114b" in test_name and u"vhost" in test_name:
1907         domain = u"vts"
1908     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1909         domain = u"dpdk"
1910     elif u"memif" in test_name:
1911         domain = u"container_memif"
1912     elif u"srv6" in test_name:
1913         domain = u"srv6"
1914     elif u"vhost" in test_name:
1915         domain = u"vhost"
1916         if u"vppl2xc" in test_name:
1917             driver += u"-vpp"
1918         else:
1919             driver += u"-testpmd"
1920         if u"lbvpplacp" in test_name:
1921             bsf += u"-link-bonding"
1922     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1923         domain = u"nf_service_density_vnfc"
1924     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1925         domain = u"nf_service_density_cnfc"
1926     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1927         domain = u"nf_service_density_cnfp"
1928     elif u"ipsec" in test_name:
1929         domain = u"ipsec"
1930         if u"sw" in test_name:
1931             bsf += u"-sw"
1932         elif u"hw" in test_name:
1933             bsf += u"-hw"
1934     elif u"ethip4vxlan" in test_name:
1935         domain = u"ip4_tunnels"
1936     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1937         domain = u"ip4"
1938     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1939         domain = u"ip6"
1940     elif u"l2xcbase" in test_name or \
1941             u"l2xcscale" in test_name or \
1942             u"l2bdbasemaclrn" in test_name or \
1943             u"l2bdscale" in test_name or \
1944             u"l2patch" in test_name:
1945         domain = u"l2"
1946     else:
1947         domain = u""
1948
1949     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1950     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1951
1952     return file_name + anchor_name
1953
1954
1955 def table_perf_trending_dash_html(table, input_data):
1956     """Generate the table(s) with algorithm:
1957     table_perf_trending_dash_html specified in the specification
1958     file.
1959
1960     :param table: Table to generate.
1961     :param input_data: Data to process.
1962     :type table: dict
1963     :type input_data: InputData
1964     """
1965
1966     _ = input_data
1967
1968     if not table.get(u"testbed", None):
1969         logging.error(
1970             f"The testbed is not defined for the table "
1971             f"{table.get(u'title', u'')}."
1972         )
1973         return
1974
1975     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1976
1977     try:
1978         with open(table[u"input-file"], u'rt') as csv_file:
1979             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1980     except KeyError:
1981         logging.warning(u"The input file is not defined.")
1982         return
1983     except csv.Error as err:
1984         logging.warning(
1985             f"Not possible to process the file {table[u'input-file']}.\n"
1986             f"{repr(err)}"
1987         )
1988         return
1989
1990     # Table:
1991     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1992
1993     # Table header:
1994     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1995     for idx, item in enumerate(csv_lst[0]):
1996         alignment = u"left" if idx == 0 else u"center"
1997         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1998         thead.text = item
1999
2000     # Rows:
2001     colors = {
2002         u"regression": (
2003             u"#ffcccc",
2004             u"#ff9999"
2005         ),
2006         u"progression": (
2007             u"#c6ecc6",
2008             u"#9fdf9f"
2009         ),
2010         u"normal": (
2011             u"#e9f1fb",
2012             u"#d4e4f7"
2013         )
2014     }
2015     for r_idx, row in enumerate(csv_lst[1:]):
2016         if int(row[4]):
2017             color = u"regression"
2018         elif int(row[5]):
2019             color = u"progression"
2020         else:
2021             color = u"normal"
2022         trow = ET.SubElement(
2023             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2024         )
2025
2026         # Columns:
2027         for c_idx, item in enumerate(row):
2028             tdata = ET.SubElement(
2029                 trow,
2030                 u"td",
2031                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2032             )
2033             # Name:
2034             if c_idx == 0:
2035                 ref = ET.SubElement(
2036                     tdata,
2037                     u"a",
2038                     attrib=dict(
2039                         href=f"../trending/"
2040                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2041                     )
2042                 )
2043                 ref.text = item
2044             else:
2045                 tdata.text = item
2046     try:
2047         with open(table[u"output-file"], u'w') as html_file:
2048             logging.info(f"    Writing file: {table[u'output-file']}")
2049             html_file.write(u".. raw:: html\n\n\t")
2050             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2051             html_file.write(u"\n\t<p><br><br></p>\n")
2052     except KeyError:
2053         logging.warning(u"The output file is not defined.")
2054         return
2055
2056
2057 def table_last_failed_tests(table, input_data):
2058     """Generate the table(s) with algorithm: table_last_failed_tests
2059     specified in the specification file.
2060
2061     :param table: Table to generate.
2062     :param input_data: Data to process.
2063     :type table: pandas.Series
2064     :type input_data: InputData
2065     """
2066
2067     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2068
2069     # Transform the data
2070     logging.info(
2071         f"    Creating the data set for the {table.get(u'type', u'')} "
2072         f"{table.get(u'title', u'')}."
2073     )
2074
2075     data = input_data.filter_data(table, continue_on_error=True)
2076
2077     if data is None or data.empty:
2078         logging.warning(
2079             f"    No data for the {table.get(u'type', u'')} "
2080             f"{table.get(u'title', u'')}."
2081         )
2082         return
2083
2084     tbl_list = list()
2085     for job, builds in table[u"data"].items():
2086         for build in builds:
2087             build = str(build)
2088             try:
2089                 version = input_data.metadata(job, build).get(u"version", u"")
2090             except KeyError:
2091                 logging.error(f"Data for {job}: {build} is not present.")
2092                 return
2093             tbl_list.append(build)
2094             tbl_list.append(version)
2095             failed_tests = list()
2096             passed = 0
2097             failed = 0
2098             for tst_data in data[job][build].values:
2099                 if tst_data[u"status"] != u"FAIL":
2100                     passed += 1
2101                     continue
2102                 failed += 1
2103                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2104                 if not groups:
2105                     continue
2106                 nic = groups.group(0)
2107                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2108             tbl_list.append(str(passed))
2109             tbl_list.append(str(failed))
2110             tbl_list.extend(failed_tests)
2111
2112     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2113     logging.info(f"    Writing file: {file_name}")
2114     with open(file_name, u"wt") as file_handler:
2115         for test in tbl_list:
2116             file_handler.write(test + u'\n')
2117
2118
2119 def table_failed_tests(table, input_data):
2120     """Generate the table(s) with algorithm: table_failed_tests
2121     specified in the specification file.
2122
2123     :param table: Table to generate.
2124     :param input_data: Data to process.
2125     :type table: pandas.Series
2126     :type input_data: InputData
2127     """
2128
2129     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2130
2131     # Transform the data
2132     logging.info(
2133         f"    Creating the data set for the {table.get(u'type', u'')} "
2134         f"{table.get(u'title', u'')}."
2135     )
2136     data = input_data.filter_data(table, continue_on_error=True)
2137
2138     # Prepare the header of the tables
2139     header = [
2140         u"Test Case",
2141         u"Failures [#]",
2142         u"Last Failure [Time]",
2143         u"Last Failure [VPP-Build-Id]",
2144         u"Last Failure [CSIT-Job-Build-Id]"
2145     ]
2146
2147     # Generate the data for the table according to the model in the table
2148     # specification
2149
2150     now = dt.utcnow()
2151     timeperiod = timedelta(int(table.get(u"window", 7)))
2152
2153     tbl_dict = dict()
2154     for job, builds in table[u"data"].items():
2155         for build in builds:
2156             build = str(build)
2157             for tst_name, tst_data in data[job][build].items():
2158                 if tst_name.lower() in table.get(u"ignore-list", list()):
2159                     continue
2160                 if tbl_dict.get(tst_name, None) is None:
2161                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
2162                     if not groups:
2163                         continue
2164                     nic = groups.group(0)
2165                     tbl_dict[tst_name] = {
2166                         u"name": f"{nic}-{tst_data[u'name']}",
2167                         u"data": OrderedDict()
2168                     }
2169                 try:
2170                     generated = input_data.metadata(job, build).\
2171                         get(u"generated", u"")
2172                     if not generated:
2173                         continue
2174                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
2175                     if (now - then) <= timeperiod:
2176                         tbl_dict[tst_name][u"data"][build] = (
2177                             tst_data[u"status"],
2178                             generated,
2179                             input_data.metadata(job, build).get(u"version",
2180                                                                 u""),
2181                             build
2182                         )
2183                 except (TypeError, KeyError) as err:
2184                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2185
2186     max_fails = 0
2187     tbl_lst = list()
2188     for tst_data in tbl_dict.values():
2189         fails_nr = 0
2190         fails_last_date = u""
2191         fails_last_vpp = u""
2192         fails_last_csit = u""
2193         for val in tst_data[u"data"].values():
2194             if val[0] == u"FAIL":
2195                 fails_nr += 1
2196                 fails_last_date = val[1]
2197                 fails_last_vpp = val[2]
2198                 fails_last_csit = val[3]
2199         if fails_nr:
2200             max_fails = fails_nr if fails_nr > max_fails else max_fails
2201             tbl_lst.append(
2202                 [
2203                     tst_data[u"name"],
2204                     fails_nr,
2205                     fails_last_date,
2206                     fails_last_vpp,
2207                     f"mrr-daily-build-{fails_last_csit}"
2208                 ]
2209             )
2210
2211     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2212     tbl_sorted = list()
2213     for nrf in range(max_fails, -1, -1):
2214         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2215         tbl_sorted.extend(tbl_fails)
2216
2217     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2218     logging.info(f"    Writing file: {file_name}")
2219     with open(file_name, u"wt") as file_handler:
2220         file_handler.write(u",".join(header) + u"\n")
2221         for test in tbl_sorted:
2222             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2223
2224     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2225     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2226
2227
2228 def table_failed_tests_html(table, input_data):
2229     """Generate the table(s) with algorithm: table_failed_tests_html
2230     specified in the specification file.
2231
2232     :param table: Table to generate.
2233     :param input_data: Data to process.
2234     :type table: pandas.Series
2235     :type input_data: InputData
2236     """
2237
2238     _ = input_data
2239
2240     if not table.get(u"testbed", None):
2241         logging.error(
2242             f"The testbed is not defined for the table "
2243             f"{table.get(u'title', u'')}."
2244         )
2245         return
2246
2247     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2248
2249     try:
2250         with open(table[u"input-file"], u'rt') as csv_file:
2251             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2252     except KeyError:
2253         logging.warning(u"The input file is not defined.")
2254         return
2255     except csv.Error as err:
2256         logging.warning(
2257             f"Not possible to process the file {table[u'input-file']}.\n"
2258             f"{repr(err)}"
2259         )
2260         return
2261
2262     # Table:
2263     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2264
2265     # Table header:
2266     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2267     for idx, item in enumerate(csv_lst[0]):
2268         alignment = u"left" if idx == 0 else u"center"
2269         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2270         thead.text = item
2271
2272     # Rows:
2273     colors = (u"#e9f1fb", u"#d4e4f7")
2274     for r_idx, row in enumerate(csv_lst[1:]):
2275         background = colors[r_idx % 2]
2276         trow = ET.SubElement(
2277             failed_tests, u"tr", attrib=dict(bgcolor=background)
2278         )
2279
2280         # Columns:
2281         for c_idx, item in enumerate(row):
2282             tdata = ET.SubElement(
2283                 trow,
2284                 u"td",
2285                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2286             )
2287             # Name:
2288             if c_idx == 0:
2289                 ref = ET.SubElement(
2290                     tdata,
2291                     u"a",
2292                     attrib=dict(
2293                         href=f"../trending/"
2294                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2295                     )
2296                 )
2297                 ref.text = item
2298             else:
2299                 tdata.text = item
2300     try:
2301         with open(table[u"output-file"], u'w') as html_file:
2302             logging.info(f"    Writing file: {table[u'output-file']}")
2303             html_file.write(u".. raw:: html\n\n\t")
2304             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2305             html_file.write(u"\n\t<p><br><br></p>\n")
2306     except KeyError:
2307         logging.warning(u"The output file is not defined.")
2308         return