4cbc7c074647b0706d95694de2689ac4c08956e0
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
33
34 from pal_utils import mean, stdev, classify_anomalies, \
35     convert_csv_to_pretty_txt, relative_change_stdev
36
37
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
39
40
41 def generate_tables(spec, data):
42     """Generate all tables specified in the specification file.
43
44     :param spec: Specification read from the specification file.
45     :param data: Data to process.
46     :type spec: Specification
47     :type data: InputData
48     """
49
50     generator = {
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100
101     sort_tests = table.get(u"sort", None)
102     if sort_tests:
103         args = dict(
104             inplace=True,
105             ascending=(sort_tests == u"ascending")
106         )
107         data.sort_index(**args)
108
109     suites = input_data.filter_data(
110         table,
111         continue_on_error=True,
112         data_set=u"suites"
113     )
114     if suites.empty:
115         return
116     suites = input_data.merge_data(suites)
117
118     def _generate_html_table(tst_data):
119         """Generate an HTML table with operational data for the given test.
120
121         :param tst_data: Test data to be used to generate the table.
122         :type tst_data: pandas.Series
123         :returns: HTML table with operational data.
124         :rtype: str
125         """
126
127         colors = {
128             u"header": u"#7eade7",
129             u"empty": u"#ffffff",
130             u"body": (u"#e9f1fb", u"#d4e4f7")
131         }
132
133         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134
135         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136         thead = ET.SubElement(
137             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138         )
139         thead.text = tst_data[u"name"]
140
141         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142         thead = ET.SubElement(
143             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144         )
145         thead.text = u"\t"
146
147         if tst_data.get(u"show-run", u"No Data") == u"No Data":
148             trow = ET.SubElement(
149                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150             )
151             tcol = ET.SubElement(
152                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153             )
154             tcol.text = u"No Data"
155
156             trow = ET.SubElement(
157                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158             )
159             thead = ET.SubElement(
160                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161             )
162             font = ET.SubElement(
163                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
164             )
165             font.text = u"."
166             return str(ET.tostring(tbl, encoding=u"unicode"))
167
168         tbl_hdr = (
169             u"Name",
170             u"Nr of Vectors",
171             u"Nr of Packets",
172             u"Suspends",
173             u"Cycles per Packet",
174             u"Average Vector Size"
175         )
176
177         for dut_data in tst_data[u"show-run"].values():
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             if dut_data.get(u"threads", None) is None:
185                 tcol.text = u"No Data"
186                 continue
187
188             bold = ET.SubElement(tcol, u"b")
189             bold.text = (
190                 f"Host IP: {dut_data.get(u'host', '')}, "
191                 f"Socket: {dut_data.get(u'socket', '')}"
192             )
193             trow = ET.SubElement(
194                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195             )
196             thead = ET.SubElement(
197                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
198             )
199             thead.text = u"\t"
200
201             for thread_nr, thread in dut_data[u"threads"].items():
202                 trow = ET.SubElement(
203                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204                 )
205                 tcol = ET.SubElement(
206                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207                 )
208                 bold = ET.SubElement(tcol, u"b")
209                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210                 trow = ET.SubElement(
211                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212                 )
213                 for idx, col in enumerate(tbl_hdr):
214                     tcol = ET.SubElement(
215                         trow, u"td",
216                         attrib=dict(align=u"right" if idx else u"left")
217                     )
218                     font = ET.SubElement(
219                         tcol, u"font", attrib=dict(size=u"2")
220                     )
221                     bold = ET.SubElement(font, u"b")
222                     bold.text = col
223                 for row_nr, row in enumerate(thread):
224                     trow = ET.SubElement(
225                         tbl, u"tr",
226                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227                     )
228                     for idx, col in enumerate(row):
229                         tcol = ET.SubElement(
230                             trow, u"td",
231                             attrib=dict(align=u"right" if idx else u"left")
232                         )
233                         font = ET.SubElement(
234                             tcol, u"font", attrib=dict(size=u"2")
235                         )
236                         if isinstance(col, float):
237                             font.text = f"{col:.2f}"
238                         else:
239                             font.text = str(col)
240                 trow = ET.SubElement(
241                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242                 )
243                 thead = ET.SubElement(
244                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
245                 )
246                 thead.text = u"\t"
247
248         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249         thead = ET.SubElement(
250             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251         )
252         font = ET.SubElement(
253             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
254         )
255         font.text = u"."
256
257         return str(ET.tostring(tbl, encoding=u"unicode"))
258
259     for suite in suites.values:
260         html_table = str()
261         for test_data in data.values:
262             if test_data[u"parent"] not in suite[u"name"]:
263                 continue
264             html_table += _generate_html_table(test_data)
265         if not html_table:
266             continue
267         try:
268             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269             with open(f"{file_name}", u'w') as html_file:
270                 logging.info(f"    Writing file: {file_name}")
271                 html_file.write(u".. raw:: html\n\n\t")
272                 html_file.write(html_table)
273                 html_file.write(u"\n\t<p><br><br></p>\n")
274         except KeyError:
275             logging.warning(u"The output file is not defined.")
276             return
277     logging.info(u"  Done.")
278
279
280 def table_merged_details(table, input_data):
281     """Generate the table(s) with algorithm: table_merged_details
282     specified in the specification file.
283
284     :param table: Table to generate.
285     :param input_data: Data to process.
286     :type table: pandas.Series
287     :type input_data: InputData
288     """
289
290     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
291
292     # Transform the data
293     logging.info(
294         f"    Creating the data set for the {table.get(u'type', u'')} "
295         f"{table.get(u'title', u'')}."
296     )
297     data = input_data.filter_data(table, continue_on_error=True)
298     data = input_data.merge_data(data)
299
300     sort_tests = table.get(u"sort", None)
301     if sort_tests:
302         args = dict(
303             inplace=True,
304             ascending=(sort_tests == u"ascending")
305         )
306         data.sort_index(**args)
307
308     suites = input_data.filter_data(
309         table, continue_on_error=True, data_set=u"suites")
310     suites = input_data.merge_data(suites)
311
312     # Prepare the header of the tables
313     header = list()
314     for column in table[u"columns"]:
315         header.append(
316             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317         )
318
319     for suite in suites.values:
320         # Generate data
321         suite_name = suite[u"name"]
322         table_lst = list()
323         for test in data.keys():
324             if data[test][u"parent"] not in suite_name:
325                 continue
326             row_lst = list()
327             for column in table[u"columns"]:
328                 try:
329                     col_data = str(data[test][column[
330                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
331                     # Do not include tests with "Test Failed" in test message
332                     if u"Test Failed" in col_data:
333                         continue
334                     col_data = col_data.replace(
335                         u"No Data", u"Not Captured     "
336                     )
337                     if column[u"data"].split(u" ")[1] in (u"name", ):
338                         if len(col_data) > 30:
339                             col_data_lst = col_data.split(u"-")
340                             half = int(len(col_data_lst) / 2)
341                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
342                                        f"- |br| " \
343                                        f"{u'-'.join(col_data_lst[half:])}"
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
346                         # Temporary solution: remove NDR results from message:
347                         if bool(table.get(u'remove-ndr', False)):
348                             try:
349                                 col_data = col_data.split(u" |br| ", 1)[1]
350                             except IndexError:
351                                 pass
352                         col_data = f" |prein| {col_data} |preout| "
353                     elif column[u"data"].split(u" ")[1] in \
354                             (u"conf-history", u"show-run"):
355                         col_data = col_data.replace(u" |br| ", u"", 1)
356                         col_data = f" |prein| {col_data[:-5]} |preout| "
357                     row_lst.append(f'"{col_data}"')
358                 except KeyError:
359                     row_lst.append(u'"Not captured"')
360             if len(row_lst) == len(table[u"columns"]):
361                 table_lst.append(row_lst)
362
363         # Write the data to file
364         if table_lst:
365             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367             logging.info(f"      Writing file: {file_name}")
368             with open(file_name, u"wt") as file_handler:
369                 file_handler.write(u",".join(header) + u"\n")
370                 for item in table_lst:
371                     file_handler.write(u",".join(item) + u"\n")
372
373     logging.info(u"  Done.")
374
375
376 def _tpc_modify_test_name(test_name):
377     """Modify a test name by replacing its parts.
378
379     :param test_name: Test name to be modified.
380     :type test_name: str
381     :returns: Modified test name.
382     :rtype: str
383     """
384     test_name_mod = test_name.\
385         replace(u"-ndrpdrdisc", u""). \
386         replace(u"-ndrpdr", u"").\
387         replace(u"-pdrdisc", u""). \
388         replace(u"-ndrdisc", u"").\
389         replace(u"-pdr", u""). \
390         replace(u"-ndr", u""). \
391         replace(u"1t1c", u"1c").\
392         replace(u"2t1c", u"1c"). \
393         replace(u"2t2c", u"2c").\
394         replace(u"4t2c", u"2c"). \
395         replace(u"4t4c", u"4c").\
396         replace(u"8t4c", u"4c")
397
398     return re.sub(REGEX_NIC, u"", test_name_mod)
399
400
401 def _tpc_modify_displayed_test_name(test_name):
402     """Modify a test name which is displayed in a table by replacing its parts.
403
404     :param test_name: Test name to be modified.
405     :type test_name: str
406     :returns: Modified test name.
407     :rtype: str
408     """
409     return test_name.\
410         replace(u"1t1c", u"1c").\
411         replace(u"2t1c", u"1c"). \
412         replace(u"2t2c", u"2c").\
413         replace(u"4t2c", u"2c"). \
414         replace(u"4t4c", u"4c").\
415         replace(u"8t4c", u"4c")
416
417
418 def _tpc_insert_data(target, src, include_tests):
419     """Insert src data to the target structure.
420
421     :param target: Target structure where the data is placed.
422     :param src: Source data to be placed into the target stucture.
423     :param include_tests: Which results will be included (MRR, NDR, PDR).
424     :type target: list
425     :type src: dict
426     :type include_tests: str
427     """
428     try:
429         if include_tests == u"MRR":
430             target.append(
431                 (
432                     src[u"result"][u"receive-rate"],
433                     src[u"result"][u"receive-stdev"]
434                 )
435             )
436         elif include_tests == u"PDR":
437             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438         elif include_tests == u"NDR":
439             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440     except (KeyError, TypeError):
441         pass
442
443
444 def _tpc_sort_table(table):
445     """Sort the table this way:
446
447     1. Put "New in CSIT-XXXX" at the first place.
448     2. Put "See footnote" at the second place.
449     3. Sort the rest by "Delta".
450
451     :param table: Table to sort.
452     :type table: list
453     :returns: Sorted table.
454     :rtype: list
455     """
456
457     tbl_new = list()
458     tbl_see = list()
459     tbl_delta = list()
460     for item in table:
461         if isinstance(item[-1], str):
462             if u"New in CSIT" in item[-1]:
463                 tbl_new.append(item)
464             elif u"See footnote" in item[-1]:
465                 tbl_see.append(item)
466         else:
467             tbl_delta.append(item)
468
469     # Sort the tables:
470     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
475
476     # Put the tables together:
477     table = list()
478     # We do not want "New in CSIT":
479     # table.extend(tbl_new)
480     table.extend(tbl_see)
481     table.extend(tbl_delta)
482
483     return table
484
485
486 def _tpc_generate_html_table(header, data, output_file_name):
487     """Generate html table from input data with simple sorting possibility.
488
489     :param header: Table header.
490     :param data: Input data to be included in the table. It is a list of lists.
491         Inner lists are rows in the table. All inner lists must be of the same
492         length. The length of these lists must be the same as the length of the
493         header.
494     :param output_file_name: The name (relative or full path) where the
495         generated html table is written.
496     :type header: list
497     :type data: list of lists
498     :type output_file_name: str
499     """
500
501     try:
502         idx = header.index(u"Test case")
503     except ValueError:
504         idx = 0
505     params = {
506         u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
507         u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
508         u"width": ([28, 9], [4, 24, 10])
509     }
510
511     df_data = pd.DataFrame(data, columns=header)
512
513     df_sorted = [df_data.sort_values(
514         by=[key, header[idx]], ascending=[True, True]
515         if key != header[idx] else [False, True]) for key in header]
516     df_sorted_rev = [df_data.sort_values(
517         by=[key, header[idx]], ascending=[False, True]
518         if key != header[idx] else [True, True]) for key in header]
519     df_sorted.extend(df_sorted_rev)
520
521     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
522                    for idx in range(len(df_data))]]
523     table_header = dict(
524         values=[f"<b>{item}</b>" for item in header],
525         fill_color=u"#7eade7",
526         align=params[u"align-hdr"][idx]
527     )
528
529     fig = go.Figure()
530
531     for table in df_sorted:
532         columns = [table.get(col) for col in header]
533         fig.add_trace(
534             go.Table(
535                 columnwidth=params[u"width"][idx],
536                 header=table_header,
537                 cells=dict(
538                     values=columns,
539                     fill_color=fill_color,
540                     align=params[u"align-itm"][idx]
541                 )
542             )
543         )
544
545     buttons = list()
546     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
547     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
548     menu_items.extend(menu_items_rev)
549     for idx, hdr in enumerate(menu_items):
550         visible = [False, ] * len(menu_items)
551         visible[idx] = True
552         buttons.append(
553             dict(
554                 label=hdr.replace(u" [Mpps]", u""),
555                 method=u"update",
556                 args=[{u"visible": visible}],
557             )
558         )
559
560     fig.update_layout(
561         updatemenus=[
562             go.layout.Updatemenu(
563                 type=u"dropdown",
564                 direction=u"down",
565                 x=0.03,
566                 xanchor=u"left",
567                 y=1.045,
568                 yanchor=u"top",
569                 active=len(menu_items) - 1,
570                 buttons=list(buttons)
571             )
572         ],
573         annotations=[
574             go.layout.Annotation(
575                 text=u"<b>Sort by:</b>",
576                 x=0,
577                 xref=u"paper",
578                 y=1.035,
579                 yref=u"paper",
580                 align=u"left",
581                 showarrow=False
582             )
583         ]
584     )
585
586     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
587
588
589 def table_perf_comparison(table, input_data):
590     """Generate the table(s) with algorithm: table_perf_comparison
591     specified in the specification file.
592
593     :param table: Table to generate.
594     :param input_data: Data to process.
595     :type table: pandas.Series
596     :type input_data: InputData
597     """
598
599     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
600
601     # Transform the data
602     logging.info(
603         f"    Creating the data set for the {table.get(u'type', u'')} "
604         f"{table.get(u'title', u'')}."
605     )
606     data = input_data.filter_data(table, continue_on_error=True)
607
608     # Prepare the header of the tables
609     try:
610         header = [u"Test case", ]
611
612         rca_data = None
613         rca = table.get(u"rca", None)
614         if rca:
615             try:
616                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
617                     rca_data = load(rca_file, Loader=FullLoader)
618                 header.insert(0, rca.get(u"title", "RCA"))
619             except (YAMLError, IOError) as err:
620                 logging.warning(repr(err))
621
622         if table[u"include-tests"] == u"MRR":
623             hdr_param = u"Rec Rate"
624         else:
625             hdr_param = u"Thput"
626
627         history = table.get(u"history", list())
628         for item in history:
629             header.extend(
630                 [
631                     f"{item[u'title']} {hdr_param} [Mpps]",
632                     f"{item[u'title']} Stdev [Mpps]"
633                 ]
634             )
635         header.extend(
636             [
637                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
638                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
639                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
640                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
641                 u"Delta [%]",
642                 u"Stdev of delta [%]"
643             ]
644         )
645         header_str = u";".join(header) + u"\n"
646     except (AttributeError, KeyError) as err:
647         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
648         return
649
650     # Prepare data to the table:
651     tbl_dict = dict()
652     for job, builds in table[u"reference"][u"data"].items():
653         for build in builds:
654             for tst_name, tst_data in data[job][str(build)].items():
655                 tst_name_mod = _tpc_modify_test_name(tst_name)
656                 if (u"across topologies" in table[u"title"].lower() or
657                         (u" 3n-" in table[u"title"].lower() and
658                          u" 2n-" in table[u"title"].lower())):
659                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
660                 if tbl_dict.get(tst_name_mod, None) is None:
661                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
662                     nic = groups.group(0) if groups else u""
663                     name = \
664                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
665                     if u"across testbeds" in table[u"title"].lower() or \
666                             u"across topologies" in table[u"title"].lower():
667                         name = _tpc_modify_displayed_test_name(name)
668                     tbl_dict[tst_name_mod] = {
669                         u"name": name,
670                         u"ref-data": list(),
671                         u"cmp-data": list()
672                     }
673                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
674                                  src=tst_data,
675                                  include_tests=table[u"include-tests"])
676
677     replacement = table[u"reference"].get(u"data-replacement", None)
678     if replacement:
679         create_new_list = True
680         rpl_data = input_data.filter_data(
681             table, data=replacement, continue_on_error=True)
682         for job, builds in replacement.items():
683             for build in builds:
684                 for tst_name, tst_data in rpl_data[job][str(build)].items():
685                     tst_name_mod = _tpc_modify_test_name(tst_name)
686                     if (u"across topologies" in table[u"title"].lower() or
687                             (u" 3n-" in table[u"title"].lower() and
688                              u" 2n-" in table[u"title"].lower())):
689                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
690                     if tbl_dict.get(tst_name_mod, None) is None:
691                         name = \
692                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
693                         if u"across testbeds" in table[u"title"].lower() or \
694                                 u"across topologies" in table[u"title"].lower():
695                             name = _tpc_modify_displayed_test_name(name)
696                         tbl_dict[tst_name_mod] = {
697                             u"name": name,
698                             u"ref-data": list(),
699                             u"cmp-data": list()
700                         }
701                     if create_new_list:
702                         create_new_list = False
703                         tbl_dict[tst_name_mod][u"ref-data"] = list()
704
705                     _tpc_insert_data(
706                         target=tbl_dict[tst_name_mod][u"ref-data"],
707                         src=tst_data,
708                         include_tests=table[u"include-tests"]
709                     )
710
711     for job, builds in table[u"compare"][u"data"].items():
712         for build in builds:
713             for tst_name, tst_data in data[job][str(build)].items():
714                 tst_name_mod = _tpc_modify_test_name(tst_name)
715                 if (u"across topologies" in table[u"title"].lower() or
716                         (u" 3n-" in table[u"title"].lower() and
717                          u" 2n-" in table[u"title"].lower())):
718                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
719                 if tbl_dict.get(tst_name_mod, None) is None:
720                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
721                     nic = groups.group(0) if groups else u""
722                     name = \
723                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
724                     if u"across testbeds" in table[u"title"].lower() or \
725                             u"across topologies" in table[u"title"].lower():
726                         name = _tpc_modify_displayed_test_name(name)
727                     tbl_dict[tst_name_mod] = {
728                         u"name": name,
729                         u"ref-data": list(),
730                         u"cmp-data": list()
731                     }
732                 _tpc_insert_data(
733                     target=tbl_dict[tst_name_mod][u"cmp-data"],
734                     src=tst_data,
735                     include_tests=table[u"include-tests"]
736                 )
737
738     replacement = table[u"compare"].get(u"data-replacement", None)
739     if replacement:
740         create_new_list = True
741         rpl_data = input_data.filter_data(
742             table, data=replacement, continue_on_error=True)
743         for job, builds in replacement.items():
744             for build in builds:
745                 for tst_name, tst_data in rpl_data[job][str(build)].items():
746                     tst_name_mod = _tpc_modify_test_name(tst_name)
747                     if (u"across topologies" in table[u"title"].lower() or
748                             (u" 3n-" in table[u"title"].lower() and
749                              u" 2n-" in table[u"title"].lower())):
750                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
751                     if tbl_dict.get(tst_name_mod, None) is None:
752                         name = \
753                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
754                         if u"across testbeds" in table[u"title"].lower() or \
755                                 u"across topologies" in table[u"title"].lower():
756                             name = _tpc_modify_displayed_test_name(name)
757                         tbl_dict[tst_name_mod] = {
758                             u"name": name,
759                             u"ref-data": list(),
760                             u"cmp-data": list()
761                         }
762                     if create_new_list:
763                         create_new_list = False
764                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
765
766                     _tpc_insert_data(
767                         target=tbl_dict[tst_name_mod][u"cmp-data"],
768                         src=tst_data,
769                         include_tests=table[u"include-tests"]
770                     )
771
772     for item in history:
773         for job, builds in item[u"data"].items():
774             for build in builds:
775                 for tst_name, tst_data in data[job][str(build)].items():
776                     tst_name_mod = _tpc_modify_test_name(tst_name)
777                     if (u"across topologies" in table[u"title"].lower() or
778                             (u" 3n-" in table[u"title"].lower() and
779                              u" 2n-" in table[u"title"].lower())):
780                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
781                     if tbl_dict.get(tst_name_mod, None) is None:
782                         continue
783                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
784                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
785                     if tbl_dict[tst_name_mod][u"history"].\
786                             get(item[u"title"], None) is None:
787                         tbl_dict[tst_name_mod][u"history"][item[
788                             u"title"]] = list()
789                     try:
790                         if table[u"include-tests"] == u"MRR":
791                             res = (tst_data[u"result"][u"receive-rate"],
792                                    tst_data[u"result"][u"receive-stdev"])
793                         elif table[u"include-tests"] == u"PDR":
794                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
795                         elif table[u"include-tests"] == u"NDR":
796                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
797                         else:
798                             continue
799                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
800                             append(res)
801                     except (TypeError, KeyError):
802                         pass
803
804     tbl_lst = list()
805     for tst_name in tbl_dict:
806         item = [tbl_dict[tst_name][u"name"], ]
807         if history:
808             if tbl_dict[tst_name].get(u"history", None) is not None:
809                 for hist_data in tbl_dict[tst_name][u"history"].values():
810                     if hist_data:
811                         if table[u"include-tests"] == u"MRR":
812                             item.append(round(hist_data[0][0] / 1e6, 2))
813                             item.append(round(hist_data[0][1] / 1e6, 2))
814                         else:
815                             item.append(round(mean(hist_data) / 1e6, 2))
816                             item.append(round(stdev(hist_data) / 1e6, 2))
817                     else:
818                         item.extend([u"Not tested", u"Not tested"])
819             else:
820                 item.extend([u"Not tested", u"Not tested"])
821         data_r = tbl_dict[tst_name][u"ref-data"]
822         if data_r:
823             if table[u"include-tests"] == u"MRR":
824                 data_r_mean = data_r[0][0]
825                 data_r_stdev = data_r[0][1]
826             else:
827                 data_r_mean = mean(data_r)
828                 data_r_stdev = stdev(data_r)
829             item.append(round(data_r_mean / 1e6, 2))
830             item.append(round(data_r_stdev / 1e6, 2))
831         else:
832             data_r_mean = None
833             data_r_stdev = None
834             item.extend([u"Not tested", u"Not tested"])
835         data_c = tbl_dict[tst_name][u"cmp-data"]
836         if data_c:
837             if table[u"include-tests"] == u"MRR":
838                 data_c_mean = data_c[0][0]
839                 data_c_stdev = data_c[0][1]
840             else:
841                 data_c_mean = mean(data_c)
842                 data_c_stdev = stdev(data_c)
843             item.append(round(data_c_mean / 1e6, 2))
844             item.append(round(data_c_stdev / 1e6, 2))
845         else:
846             data_c_mean = None
847             data_c_stdev = None
848             item.extend([u"Not tested", u"Not tested"])
849         if item[-2] == u"Not tested":
850             pass
851         elif item[-4] == u"Not tested":
852             item.append(u"New in CSIT-2001")
853             item.append(u"New in CSIT-2001")
854         elif data_r_mean is not None and data_c_mean is not None:
855             delta, d_stdev = relative_change_stdev(
856                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
857             )
858             try:
859                 item.append(round(delta))
860             except ValueError:
861                 item.append(delta)
862             try:
863                 item.append(round(d_stdev))
864             except ValueError:
865                 item.append(d_stdev)
866         if rca_data:
867             rca_nr = rca_data.get(item[0], u"-")
868             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
869         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
870             tbl_lst.append(item)
871
872     tbl_lst = _tpc_sort_table(tbl_lst)
873
874     # Generate csv tables:
875     csv_file = f"{table[u'output-file']}.csv"
876     with open(csv_file, u"wt") as file_handler:
877         file_handler.write(header_str)
878         for test in tbl_lst:
879             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
880
881     txt_file_name = f"{table[u'output-file']}.txt"
882     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
883
884     if rca_data:
885         footnote = rca_data.get(u"footnote", "")
886         if footnote:
887             with open(txt_file_name, u'a') as txt_file:
888                 txt_file.writelines(footnote)
889
890     # Generate html table:
891     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
892
893
894 def table_perf_comparison_nic(table, input_data):
895     """Generate the table(s) with algorithm: table_perf_comparison
896     specified in the specification file.
897
898     :param table: Table to generate.
899     :param input_data: Data to process.
900     :type table: pandas.Series
901     :type input_data: InputData
902     """
903
904     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
905
906     # Transform the data
907     logging.info(
908         f"    Creating the data set for the {table.get(u'type', u'')} "
909         f"{table.get(u'title', u'')}."
910     )
911     data = input_data.filter_data(table, continue_on_error=True)
912
913     # Prepare the header of the tables
914     try:
915         header = [u"Test case", ]
916
917         rca_data = None
918         rca = table.get(u"rca", None)
919         if rca:
920             try:
921                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
922                     rca_data = load(rca_file, Loader=FullLoader)
923                 header.insert(0, rca.get(u"title", "RCA"))
924             except (YAMLError, IOError) as err:
925                 logging.warning(repr(err))
926
927         if table[u"include-tests"] == u"MRR":
928             hdr_param = u"Rec Rate"
929         else:
930             hdr_param = u"Thput"
931
932         history = table.get(u"history", list())
933         for item in history:
934             header.extend(
935                 [
936                     f"{item[u'title']} {hdr_param} [Mpps]",
937                     f"{item[u'title']} Stdev [Mpps]"
938                 ]
939             )
940         header.extend(
941             [
942                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
943                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
944                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
945                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
946                 u"Delta [%]",
947                 u"Stdev of delta [%]"
948             ]
949         )
950         header_str = u";".join(header) + u"\n"
951     except (AttributeError, KeyError) as err:
952         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
953         return
954
955     # Prepare data to the table:
956     tbl_dict = dict()
957     for job, builds in table[u"reference"][u"data"].items():
958         for build in builds:
959             for tst_name, tst_data in data[job][str(build)].items():
960                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
961                     continue
962                 tst_name_mod = _tpc_modify_test_name(tst_name)
963                 if (u"across topologies" in table[u"title"].lower() or
964                         (u" 3n-" in table[u"title"].lower() and
965                          u" 2n-" in table[u"title"].lower())):
966                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
967                 if tbl_dict.get(tst_name_mod, None) is None:
968                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
969                     if u"across testbeds" in table[u"title"].lower() or \
970                             u"across topologies" in table[u"title"].lower():
971                         name = _tpc_modify_displayed_test_name(name)
972                     tbl_dict[tst_name_mod] = {
973                         u"name": name,
974                         u"ref-data": list(),
975                         u"cmp-data": list()
976                     }
977                 _tpc_insert_data(
978                     target=tbl_dict[tst_name_mod][u"ref-data"],
979                     src=tst_data,
980                     include_tests=table[u"include-tests"]
981                 )
982
983     replacement = table[u"reference"].get(u"data-replacement", None)
984     if replacement:
985         create_new_list = True
986         rpl_data = input_data.filter_data(
987             table, data=replacement, continue_on_error=True)
988         for job, builds in replacement.items():
989             for build in builds:
990                 for tst_name, tst_data in rpl_data[job][str(build)].items():
991                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
992                         continue
993                     tst_name_mod = _tpc_modify_test_name(tst_name)
994                     if (u"across topologies" in table[u"title"].lower() or
995                             (u" 3n-" in table[u"title"].lower() and
996                              u" 2n-" in table[u"title"].lower())):
997                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
998                     if tbl_dict.get(tst_name_mod, None) is None:
999                         name = \
1000                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1001                         if u"across testbeds" in table[u"title"].lower() or \
1002                                 u"across topologies" in table[u"title"].lower():
1003                             name = _tpc_modify_displayed_test_name(name)
1004                         tbl_dict[tst_name_mod] = {
1005                             u"name": name,
1006                             u"ref-data": list(),
1007                             u"cmp-data": list()
1008                         }
1009                     if create_new_list:
1010                         create_new_list = False
1011                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1012
1013                     _tpc_insert_data(
1014                         target=tbl_dict[tst_name_mod][u"ref-data"],
1015                         src=tst_data,
1016                         include_tests=table[u"include-tests"]
1017                     )
1018
1019     for job, builds in table[u"compare"][u"data"].items():
1020         for build in builds:
1021             for tst_name, tst_data in data[job][str(build)].items():
1022                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1023                     continue
1024                 tst_name_mod = _tpc_modify_test_name(tst_name)
1025                 if (u"across topologies" in table[u"title"].lower() or
1026                         (u" 3n-" in table[u"title"].lower() and
1027                          u" 2n-" in table[u"title"].lower())):
1028                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1029                 if tbl_dict.get(tst_name_mod, None) is None:
1030                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1031                     if u"across testbeds" in table[u"title"].lower() or \
1032                             u"across topologies" in table[u"title"].lower():
1033                         name = _tpc_modify_displayed_test_name(name)
1034                     tbl_dict[tst_name_mod] = {
1035                         u"name": name,
1036                         u"ref-data": list(),
1037                         u"cmp-data": list()
1038                     }
1039                 _tpc_insert_data(
1040                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1041                     src=tst_data,
1042                     include_tests=table[u"include-tests"]
1043                 )
1044
1045     replacement = table[u"compare"].get(u"data-replacement", None)
1046     if replacement:
1047         create_new_list = True
1048         rpl_data = input_data.filter_data(
1049             table, data=replacement, continue_on_error=True)
1050         for job, builds in replacement.items():
1051             for build in builds:
1052                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1053                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1054                         continue
1055                     tst_name_mod = _tpc_modify_test_name(tst_name)
1056                     if (u"across topologies" in table[u"title"].lower() or
1057                             (u" 3n-" in table[u"title"].lower() and
1058                              u" 2n-" in table[u"title"].lower())):
1059                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1060                     if tbl_dict.get(tst_name_mod, None) is None:
1061                         name = \
1062                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1063                         if u"across testbeds" in table[u"title"].lower() or \
1064                                 u"across topologies" in table[u"title"].lower():
1065                             name = _tpc_modify_displayed_test_name(name)
1066                         tbl_dict[tst_name_mod] = {
1067                             u"name": name,
1068                             u"ref-data": list(),
1069                             u"cmp-data": list()
1070                         }
1071                     if create_new_list:
1072                         create_new_list = False
1073                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1074
1075                     _tpc_insert_data(
1076                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1077                         src=tst_data,
1078                         include_tests=table[u"include-tests"]
1079                     )
1080
1081     for item in history:
1082         for job, builds in item[u"data"].items():
1083             for build in builds:
1084                 for tst_name, tst_data in data[job][str(build)].items():
1085                     if item[u"nic"] not in tst_data[u"tags"]:
1086                         continue
1087                     tst_name_mod = _tpc_modify_test_name(tst_name)
1088                     if (u"across topologies" in table[u"title"].lower() or
1089                             (u" 3n-" in table[u"title"].lower() and
1090                              u" 2n-" in table[u"title"].lower())):
1091                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1092                     if tbl_dict.get(tst_name_mod, None) is None:
1093                         continue
1094                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1095                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1096                     if tbl_dict[tst_name_mod][u"history"].\
1097                             get(item[u"title"], None) is None:
1098                         tbl_dict[tst_name_mod][u"history"][item[
1099                             u"title"]] = list()
1100                     try:
1101                         if table[u"include-tests"] == u"MRR":
1102                             res = (tst_data[u"result"][u"receive-rate"],
1103                                    tst_data[u"result"][u"receive-stdev"])
1104                         elif table[u"include-tests"] == u"PDR":
1105                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1106                         elif table[u"include-tests"] == u"NDR":
1107                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1108                         else:
1109                             continue
1110                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1111                             append(res)
1112                     except (TypeError, KeyError):
1113                         pass
1114
1115     tbl_lst = list()
1116     for tst_name in tbl_dict:
1117         item = [tbl_dict[tst_name][u"name"], ]
1118         if history:
1119             if tbl_dict[tst_name].get(u"history", None) is not None:
1120                 for hist_data in tbl_dict[tst_name][u"history"].values():
1121                     if hist_data:
1122                         if table[u"include-tests"] == u"MRR":
1123                             item.append(round(hist_data[0][0] / 1e6, 2))
1124                             item.append(round(hist_data[0][1] / 1e6, 2))
1125                         else:
1126                             item.append(round(mean(hist_data) / 1e6, 2))
1127                             item.append(round(stdev(hist_data) / 1e6, 2))
1128                     else:
1129                         item.extend([u"Not tested", u"Not tested"])
1130             else:
1131                 item.extend([u"Not tested", u"Not tested"])
1132         data_r = tbl_dict[tst_name][u"ref-data"]
1133         if data_r:
1134             if table[u"include-tests"] == u"MRR":
1135                 data_r_mean = data_r[0][0]
1136                 data_r_stdev = data_r[0][1]
1137             else:
1138                 data_r_mean = mean(data_r)
1139                 data_r_stdev = stdev(data_r)
1140             item.append(round(data_r_mean / 1e6, 2))
1141             item.append(round(data_r_stdev / 1e6, 2))
1142         else:
1143             data_r_mean = None
1144             data_r_stdev = None
1145             item.extend([u"Not tested", u"Not tested"])
1146         data_c = tbl_dict[tst_name][u"cmp-data"]
1147         if data_c:
1148             if table[u"include-tests"] == u"MRR":
1149                 data_c_mean = data_c[0][0]
1150                 data_c_stdev = data_c[0][1]
1151             else:
1152                 data_c_mean = mean(data_c)
1153                 data_c_stdev = stdev(data_c)
1154             item.append(round(data_c_mean / 1e6, 2))
1155             item.append(round(data_c_stdev / 1e6, 2))
1156         else:
1157             data_c_mean = None
1158             data_c_stdev = None
1159             item.extend([u"Not tested", u"Not tested"])
1160         if item[-2] == u"Not tested":
1161             pass
1162         elif item[-4] == u"Not tested":
1163             item.append(u"New in CSIT-2001")
1164             item.append(u"New in CSIT-2001")
1165         elif data_r_mean is not None and data_c_mean is not None:
1166             delta, d_stdev = relative_change_stdev(
1167                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1168             )
1169             try:
1170                 item.append(round(delta))
1171             except ValueError:
1172                 item.append(delta)
1173             try:
1174                 item.append(round(d_stdev))
1175             except ValueError:
1176                 item.append(d_stdev)
1177         if rca_data:
1178             rca_nr = rca_data.get(item[0], u"-")
1179             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1180         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1181             tbl_lst.append(item)
1182
1183     tbl_lst = _tpc_sort_table(tbl_lst)
1184
1185     # Generate csv tables:
1186     csv_file = f"{table[u'output-file']}.csv"
1187     with open(csv_file, u"wt") as file_handler:
1188         file_handler.write(header_str)
1189         for test in tbl_lst:
1190             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1191
1192     txt_file_name = f"{table[u'output-file']}.txt"
1193     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1194
1195     if rca_data:
1196         footnote = rca_data.get(u"footnote", "")
1197         if footnote:
1198             with open(txt_file_name, u'a') as txt_file:
1199                 txt_file.writelines(footnote)
1200
1201     # Generate html table:
1202     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1203
1204
1205 def table_nics_comparison(table, input_data):
1206     """Generate the table(s) with algorithm: table_nics_comparison
1207     specified in the specification file.
1208
1209     :param table: Table to generate.
1210     :param input_data: Data to process.
1211     :type table: pandas.Series
1212     :type input_data: InputData
1213     """
1214
1215     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1216
1217     # Transform the data
1218     logging.info(
1219         f"    Creating the data set for the {table.get(u'type', u'')} "
1220         f"{table.get(u'title', u'')}."
1221     )
1222     data = input_data.filter_data(table, continue_on_error=True)
1223
1224     # Prepare the header of the tables
1225     try:
1226         header = [u"Test case", ]
1227
1228         if table[u"include-tests"] == u"MRR":
1229             hdr_param = u"Rec Rate"
1230         else:
1231             hdr_param = u"Thput"
1232
1233         header.extend(
1234             [
1235                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1236                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1237                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1238                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1239                 u"Delta [%]",
1240                 u"Stdev of delta [%]"
1241             ]
1242         )
1243
1244     except (AttributeError, KeyError) as err:
1245         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1246         return
1247
1248     # Prepare data to the table:
1249     tbl_dict = dict()
1250     for job, builds in table[u"data"].items():
1251         for build in builds:
1252             for tst_name, tst_data in data[job][str(build)].items():
1253                 tst_name_mod = _tpc_modify_test_name(tst_name)
1254                 if tbl_dict.get(tst_name_mod, None) is None:
1255                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1256                     tbl_dict[tst_name_mod] = {
1257                         u"name": name,
1258                         u"ref-data": list(),
1259                         u"cmp-data": list()
1260                     }
1261                 try:
1262                     if table[u"include-tests"] == u"MRR":
1263                         result = (tst_data[u"result"][u"receive-rate"],
1264                                   tst_data[u"result"][u"receive-stdev"])
1265                     elif table[u"include-tests"] == u"PDR":
1266                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1267                     elif table[u"include-tests"] == u"NDR":
1268                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1269                     else:
1270                         continue
1271
1272                     if result and \
1273                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1274                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1275                     elif result and \
1276                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1277                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1278                 except (TypeError, KeyError) as err:
1279                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1280                     # No data in output.xml for this test
1281
1282     tbl_lst = list()
1283     for tst_name in tbl_dict:
1284         item = [tbl_dict[tst_name][u"name"], ]
1285         data_r = tbl_dict[tst_name][u"ref-data"]
1286         if data_r:
1287             if table[u"include-tests"] == u"MRR":
1288                 data_r_mean = data_r[0][0]
1289                 data_r_stdev = data_r[0][1]
1290             else:
1291                 data_r_mean = mean(data_r)
1292                 data_r_stdev = stdev(data_r)
1293             item.append(round(data_r_mean / 1e6, 2))
1294             item.append(round(data_r_stdev / 1e6, 2))
1295         else:
1296             data_r_mean = None
1297             data_r_stdev = None
1298             item.extend([None, None])
1299         data_c = tbl_dict[tst_name][u"cmp-data"]
1300         if data_c:
1301             if table[u"include-tests"] == u"MRR":
1302                 data_c_mean = data_c[0][0]
1303                 data_c_stdev = data_c[0][1]
1304             else:
1305                 data_c_mean = mean(data_c)
1306                 data_c_stdev = stdev(data_c)
1307             item.append(round(data_c_mean / 1e6, 2))
1308             item.append(round(data_c_stdev / 1e6, 2))
1309         else:
1310             data_c_mean = None
1311             data_c_stdev = None
1312             item.extend([None, None])
1313         if data_r_mean is not None and data_c_mean is not None:
1314             delta, d_stdev = relative_change_stdev(
1315                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1316             )
1317             try:
1318                 item.append(round(delta))
1319             except ValueError:
1320                 item.append(delta)
1321             try:
1322                 item.append(round(d_stdev))
1323             except ValueError:
1324                 item.append(d_stdev)
1325             tbl_lst.append(item)
1326
1327     # Sort the table according to the relative change
1328     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1329
1330     # Generate csv tables:
1331     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1332         file_handler.write(u",".join(header) + u"\n")
1333         for test in tbl_lst:
1334             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1335
1336     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1337                               f"{table[u'output-file']}.txt")
1338
1339     # Generate html table:
1340     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1341
1342
1343 def table_soak_vs_ndr(table, input_data):
1344     """Generate the table(s) with algorithm: table_soak_vs_ndr
1345     specified in the specification file.
1346
1347     :param table: Table to generate.
1348     :param input_data: Data to process.
1349     :type table: pandas.Series
1350     :type input_data: InputData
1351     """
1352
1353     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1354
1355     # Transform the data
1356     logging.info(
1357         f"    Creating the data set for the {table.get(u'type', u'')} "
1358         f"{table.get(u'title', u'')}."
1359     )
1360     data = input_data.filter_data(table, continue_on_error=True)
1361
1362     # Prepare the header of the table
1363     try:
1364         header = [
1365             u"Test case",
1366             f"{table[u'reference'][u'title']} Thput [Mpps]",
1367             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1368             f"{table[u'compare'][u'title']} Thput [Mpps]",
1369             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1370             u"Delta [%]",
1371             u"Stdev of delta [%]"
1372         ]
1373         header_str = u",".join(header) + u"\n"
1374     except (AttributeError, KeyError) as err:
1375         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1376         return
1377
1378     # Create a list of available SOAK test results:
1379     tbl_dict = dict()
1380     for job, builds in table[u"compare"][u"data"].items():
1381         for build in builds:
1382             for tst_name, tst_data in data[job][str(build)].items():
1383                 if tst_data[u"type"] == u"SOAK":
1384                     tst_name_mod = tst_name.replace(u"-soak", u"")
1385                     if tbl_dict.get(tst_name_mod, None) is None:
1386                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1387                         nic = groups.group(0) if groups else u""
1388                         name = (
1389                             f"{nic}-"
1390                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1391                         )
1392                         tbl_dict[tst_name_mod] = {
1393                             u"name": name,
1394                             u"ref-data": list(),
1395                             u"cmp-data": list()
1396                         }
1397                     try:
1398                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1399                             tst_data[u"throughput"][u"LOWER"])
1400                     except (KeyError, TypeError):
1401                         pass
1402     tests_lst = tbl_dict.keys()
1403
1404     # Add corresponding NDR test results:
1405     for job, builds in table[u"reference"][u"data"].items():
1406         for build in builds:
1407             for tst_name, tst_data in data[job][str(build)].items():
1408                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1409                     replace(u"-mrr", u"")
1410                 if tst_name_mod not in tests_lst:
1411                     continue
1412                 try:
1413                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1414                         continue
1415                     if table[u"include-tests"] == u"MRR":
1416                         result = (tst_data[u"result"][u"receive-rate"],
1417                                   tst_data[u"result"][u"receive-stdev"])
1418                     elif table[u"include-tests"] == u"PDR":
1419                         result = \
1420                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1421                     elif table[u"include-tests"] == u"NDR":
1422                         result = \
1423                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1424                     else:
1425                         result = None
1426                     if result is not None:
1427                         tbl_dict[tst_name_mod][u"ref-data"].append(
1428                             result)
1429                 except (KeyError, TypeError):
1430                     continue
1431
1432     tbl_lst = list()
1433     for tst_name in tbl_dict:
1434         item = [tbl_dict[tst_name][u"name"], ]
1435         data_r = tbl_dict[tst_name][u"ref-data"]
1436         if data_r:
1437             if table[u"include-tests"] == u"MRR":
1438                 data_r_mean = data_r[0][0]
1439                 data_r_stdev = data_r[0][1]
1440             else:
1441                 data_r_mean = mean(data_r)
1442                 data_r_stdev = stdev(data_r)
1443             item.append(round(data_r_mean / 1e6, 2))
1444             item.append(round(data_r_stdev / 1e6, 2))
1445         else:
1446             data_r_mean = None
1447             data_r_stdev = None
1448             item.extend([None, None])
1449         data_c = tbl_dict[tst_name][u"cmp-data"]
1450         if data_c:
1451             if table[u"include-tests"] == u"MRR":
1452                 data_c_mean = data_c[0][0]
1453                 data_c_stdev = data_c[0][1]
1454             else:
1455                 data_c_mean = mean(data_c)
1456                 data_c_stdev = stdev(data_c)
1457             item.append(round(data_c_mean / 1e6, 2))
1458             item.append(round(data_c_stdev / 1e6, 2))
1459         else:
1460             data_c_mean = None
1461             data_c_stdev = None
1462             item.extend([None, None])
1463         if data_r_mean is not None and data_c_mean is not None:
1464             delta, d_stdev = relative_change_stdev(
1465                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1466             try:
1467                 item.append(round(delta))
1468             except ValueError:
1469                 item.append(delta)
1470             try:
1471                 item.append(round(d_stdev))
1472             except ValueError:
1473                 item.append(d_stdev)
1474             tbl_lst.append(item)
1475
1476     # Sort the table according to the relative change
1477     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1478
1479     # Generate csv tables:
1480     csv_file = f"{table[u'output-file']}.csv"
1481     with open(csv_file, u"wt") as file_handler:
1482         file_handler.write(header_str)
1483         for test in tbl_lst:
1484             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1485
1486     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1487
1488     # Generate html table:
1489     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1490
1491
1492 def table_perf_trending_dash(table, input_data):
1493     """Generate the table(s) with algorithm:
1494     table_perf_trending_dash
1495     specified in the specification file.
1496
1497     :param table: Table to generate.
1498     :param input_data: Data to process.
1499     :type table: pandas.Series
1500     :type input_data: InputData
1501     """
1502
1503     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1504
1505     # Transform the data
1506     logging.info(
1507         f"    Creating the data set for the {table.get(u'type', u'')} "
1508         f"{table.get(u'title', u'')}."
1509     )
1510     data = input_data.filter_data(table, continue_on_error=True)
1511
1512     # Prepare the header of the tables
1513     header = [
1514         u"Test Case",
1515         u"Trend [Mpps]",
1516         u"Short-Term Change [%]",
1517         u"Long-Term Change [%]",
1518         u"Regressions [#]",
1519         u"Progressions [#]"
1520     ]
1521     header_str = u",".join(header) + u"\n"
1522
1523     # Prepare data to the table:
1524     tbl_dict = dict()
1525     for job, builds in table[u"data"].items():
1526         for build in builds:
1527             for tst_name, tst_data in data[job][str(build)].items():
1528                 if tst_name.lower() in table.get(u"ignore-list", list()):
1529                     continue
1530                 if tbl_dict.get(tst_name, None) is None:
1531                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1532                     if not groups:
1533                         continue
1534                     nic = groups.group(0)
1535                     tbl_dict[tst_name] = {
1536                         u"name": f"{nic}-{tst_data[u'name']}",
1537                         u"data": OrderedDict()
1538                     }
1539                 try:
1540                     tbl_dict[tst_name][u"data"][str(build)] = \
1541                         tst_data[u"result"][u"receive-rate"]
1542                 except (TypeError, KeyError):
1543                     pass  # No data in output.xml for this test
1544
1545     tbl_lst = list()
1546     for tst_name in tbl_dict:
1547         data_t = tbl_dict[tst_name][u"data"]
1548         if len(data_t) < 2:
1549             continue
1550
1551         classification_lst, avgs = classify_anomalies(data_t)
1552
1553         win_size = min(len(data_t), table[u"window"])
1554         long_win_size = min(len(data_t), table[u"long-trend-window"])
1555
1556         try:
1557             max_long_avg = max(
1558                 [x for x in avgs[-long_win_size:-win_size]
1559                  if not isnan(x)])
1560         except ValueError:
1561             max_long_avg = nan
1562         last_avg = avgs[-1]
1563         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1564
1565         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1566             rel_change_last = nan
1567         else:
1568             rel_change_last = round(
1569                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1570
1571         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1572             rel_change_long = nan
1573         else:
1574             rel_change_long = round(
1575                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1576
1577         if classification_lst:
1578             if isnan(rel_change_last) and isnan(rel_change_long):
1579                 continue
1580             if isnan(last_avg) or isnan(rel_change_last) or \
1581                     isnan(rel_change_long):
1582                 continue
1583             tbl_lst.append(
1584                 [tbl_dict[tst_name][u"name"],
1585                  round(last_avg / 1000000, 2),
1586                  rel_change_last,
1587                  rel_change_long,
1588                  classification_lst[-win_size:].count(u"regression"),
1589                  classification_lst[-win_size:].count(u"progression")])
1590
1591     tbl_lst.sort(key=lambda rel: rel[0])
1592
1593     tbl_sorted = list()
1594     for nrr in range(table[u"window"], -1, -1):
1595         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1596         for nrp in range(table[u"window"], -1, -1):
1597             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1598             tbl_out.sort(key=lambda rel: rel[2])
1599             tbl_sorted.extend(tbl_out)
1600
1601     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1602
1603     logging.info(f"    Writing file: {file_name}")
1604     with open(file_name, u"wt") as file_handler:
1605         file_handler.write(header_str)
1606         for test in tbl_sorted:
1607             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1608
1609     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1610     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1611
1612
1613 def _generate_url(testbed, test_name):
1614     """Generate URL to a trending plot from the name of the test case.
1615
1616     :param testbed: The testbed used for testing.
1617     :param test_name: The name of the test case.
1618     :type testbed: str
1619     :type test_name: str
1620     :returns: The URL to the plot with the trending data for the given test
1621         case.
1622     :rtype str
1623     """
1624
1625     if u"x520" in test_name:
1626         nic = u"x520"
1627     elif u"x710" in test_name:
1628         nic = u"x710"
1629     elif u"xl710" in test_name:
1630         nic = u"xl710"
1631     elif u"xxv710" in test_name:
1632         nic = u"xxv710"
1633     elif u"vic1227" in test_name:
1634         nic = u"vic1227"
1635     elif u"vic1385" in test_name:
1636         nic = u"vic1385"
1637     elif u"x553" in test_name:
1638         nic = u"x553"
1639     elif u"cx556" in test_name or u"cx556a" in test_name:
1640         nic = u"cx556a"
1641     else:
1642         nic = u""
1643
1644     if u"64b" in test_name:
1645         frame_size = u"64b"
1646     elif u"78b" in test_name:
1647         frame_size = u"78b"
1648     elif u"imix" in test_name:
1649         frame_size = u"imix"
1650     elif u"9000b" in test_name:
1651         frame_size = u"9000b"
1652     elif u"1518b" in test_name:
1653         frame_size = u"1518b"
1654     elif u"114b" in test_name:
1655         frame_size = u"114b"
1656     else:
1657         frame_size = u""
1658
1659     if u"1t1c" in test_name or \
1660         (u"-1c-" in test_name and
1661          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1662         cores = u"1t1c"
1663     elif u"2t2c" in test_name or \
1664          (u"-2c-" in test_name and
1665           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1666         cores = u"2t2c"
1667     elif u"4t4c" in test_name or \
1668          (u"-4c-" in test_name and
1669           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1670         cores = u"4t4c"
1671     elif u"2t1c" in test_name or \
1672          (u"-1c-" in test_name and
1673           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1674         cores = u"2t1c"
1675     elif u"4t2c" in test_name or \
1676          (u"-2c-" in test_name and
1677           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1678         cores = u"4t2c"
1679     elif u"8t4c" in test_name or \
1680          (u"-4c-" in test_name and
1681           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1682         cores = u"8t4c"
1683     else:
1684         cores = u""
1685
1686     if u"testpmd" in test_name:
1687         driver = u"testpmd"
1688     elif u"l3fwd" in test_name:
1689         driver = u"l3fwd"
1690     elif u"avf" in test_name:
1691         driver = u"avf"
1692     elif u"rdma" in test_name:
1693         driver = u"rdma"
1694     elif u"dnv" in testbed or u"tsh" in testbed:
1695         driver = u"ixgbe"
1696     else:
1697         driver = u"dpdk"
1698
1699     if u"acl" in test_name or \
1700             u"macip" in test_name or \
1701             u"nat" in test_name or \
1702             u"policer" in test_name or \
1703             u"cop" in test_name:
1704         bsf = u"features"
1705     elif u"scale" in test_name:
1706         bsf = u"scale"
1707     elif u"base" in test_name:
1708         bsf = u"base"
1709     else:
1710         bsf = u"base"
1711
1712     if u"114b" in test_name and u"vhost" in test_name:
1713         domain = u"vts"
1714     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1715         domain = u"dpdk"
1716     elif u"memif" in test_name:
1717         domain = u"container_memif"
1718     elif u"srv6" in test_name:
1719         domain = u"srv6"
1720     elif u"vhost" in test_name:
1721         domain = u"vhost"
1722         if u"vppl2xc" in test_name:
1723             driver += u"-vpp"
1724         else:
1725             driver += u"-testpmd"
1726         if u"lbvpplacp" in test_name:
1727             bsf += u"-link-bonding"
1728     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1729         domain = u"nf_service_density_vnfc"
1730     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1731         domain = u"nf_service_density_cnfc"
1732     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1733         domain = u"nf_service_density_cnfp"
1734     elif u"ipsec" in test_name:
1735         domain = u"ipsec"
1736         if u"sw" in test_name:
1737             bsf += u"-sw"
1738         elif u"hw" in test_name:
1739             bsf += u"-hw"
1740     elif u"ethip4vxlan" in test_name:
1741         domain = u"ip4_tunnels"
1742     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1743         domain = u"ip4"
1744     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1745         domain = u"ip6"
1746     elif u"l2xcbase" in test_name or \
1747             u"l2xcscale" in test_name or \
1748             u"l2bdbasemaclrn" in test_name or \
1749             u"l2bdscale" in test_name or \
1750             u"l2patch" in test_name:
1751         domain = u"l2"
1752     else:
1753         domain = u""
1754
1755     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1756     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1757
1758     return file_name + anchor_name
1759
1760
1761 def table_perf_trending_dash_html(table, input_data):
1762     """Generate the table(s) with algorithm:
1763     table_perf_trending_dash_html specified in the specification
1764     file.
1765
1766     :param table: Table to generate.
1767     :param input_data: Data to process.
1768     :type table: dict
1769     :type input_data: InputData
1770     """
1771
1772     _ = input_data
1773
1774     if not table.get(u"testbed", None):
1775         logging.error(
1776             f"The testbed is not defined for the table "
1777             f"{table.get(u'title', u'')}."
1778         )
1779         return
1780
1781     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1782
1783     try:
1784         with open(table[u"input-file"], u'rt') as csv_file:
1785             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1786     except KeyError:
1787         logging.warning(u"The input file is not defined.")
1788         return
1789     except csv.Error as err:
1790         logging.warning(
1791             f"Not possible to process the file {table[u'input-file']}.\n"
1792             f"{repr(err)}"
1793         )
1794         return
1795
1796     # Table:
1797     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1798
1799     # Table header:
1800     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1801     for idx, item in enumerate(csv_lst[0]):
1802         alignment = u"left" if idx == 0 else u"center"
1803         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1804         thead.text = item
1805
1806     # Rows:
1807     colors = {
1808         u"regression": (
1809             u"#ffcccc",
1810             u"#ff9999"
1811         ),
1812         u"progression": (
1813             u"#c6ecc6",
1814             u"#9fdf9f"
1815         ),
1816         u"normal": (
1817             u"#e9f1fb",
1818             u"#d4e4f7"
1819         )
1820     }
1821     for r_idx, row in enumerate(csv_lst[1:]):
1822         if int(row[4]):
1823             color = u"regression"
1824         elif int(row[5]):
1825             color = u"progression"
1826         else:
1827             color = u"normal"
1828         trow = ET.SubElement(
1829             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1830         )
1831
1832         # Columns:
1833         for c_idx, item in enumerate(row):
1834             tdata = ET.SubElement(
1835                 trow,
1836                 u"td",
1837                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1838             )
1839             # Name:
1840             if c_idx == 0:
1841                 ref = ET.SubElement(
1842                     tdata,
1843                     u"a",
1844                     attrib=dict(
1845                         href=f"../trending/"
1846                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1847                     )
1848                 )
1849                 ref.text = item
1850             else:
1851                 tdata.text = item
1852     try:
1853         with open(table[u"output-file"], u'w') as html_file:
1854             logging.info(f"    Writing file: {table[u'output-file']}")
1855             html_file.write(u".. raw:: html\n\n\t")
1856             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1857             html_file.write(u"\n\t<p><br><br></p>\n")
1858     except KeyError:
1859         logging.warning(u"The output file is not defined.")
1860         return
1861
1862
1863 def table_last_failed_tests(table, input_data):
1864     """Generate the table(s) with algorithm: table_last_failed_tests
1865     specified in the specification file.
1866
1867     :param table: Table to generate.
1868     :param input_data: Data to process.
1869     :type table: pandas.Series
1870     :type input_data: InputData
1871     """
1872
1873     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1874
1875     # Transform the data
1876     logging.info(
1877         f"    Creating the data set for the {table.get(u'type', u'')} "
1878         f"{table.get(u'title', u'')}."
1879     )
1880
1881     data = input_data.filter_data(table, continue_on_error=True)
1882
1883     if data is None or data.empty:
1884         logging.warning(
1885             f"    No data for the {table.get(u'type', u'')} "
1886             f"{table.get(u'title', u'')}."
1887         )
1888         return
1889
1890     tbl_list = list()
1891     for job, builds in table[u"data"].items():
1892         for build in builds:
1893             build = str(build)
1894             try:
1895                 version = input_data.metadata(job, build).get(u"version", u"")
1896             except KeyError:
1897                 logging.error(f"Data for {job}: {build} is not present.")
1898                 return
1899             tbl_list.append(build)
1900             tbl_list.append(version)
1901             failed_tests = list()
1902             passed = 0
1903             failed = 0
1904             for tst_data in data[job][build].values:
1905                 if tst_data[u"status"] != u"FAIL":
1906                     passed += 1
1907                     continue
1908                 failed += 1
1909                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1910                 if not groups:
1911                     continue
1912                 nic = groups.group(0)
1913                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1914             tbl_list.append(str(passed))
1915             tbl_list.append(str(failed))
1916             tbl_list.extend(failed_tests)
1917
1918     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1919     logging.info(f"    Writing file: {file_name}")
1920     with open(file_name, u"wt") as file_handler:
1921         for test in tbl_list:
1922             file_handler.write(test + u'\n')
1923
1924
1925 def table_failed_tests(table, input_data):
1926     """Generate the table(s) with algorithm: table_failed_tests
1927     specified in the specification file.
1928
1929     :param table: Table to generate.
1930     :param input_data: Data to process.
1931     :type table: pandas.Series
1932     :type input_data: InputData
1933     """
1934
1935     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1936
1937     # Transform the data
1938     logging.info(
1939         f"    Creating the data set for the {table.get(u'type', u'')} "
1940         f"{table.get(u'title', u'')}."
1941     )
1942     data = input_data.filter_data(table, continue_on_error=True)
1943
1944     # Prepare the header of the tables
1945     header = [
1946         u"Test Case",
1947         u"Failures [#]",
1948         u"Last Failure [Time]",
1949         u"Last Failure [VPP-Build-Id]",
1950         u"Last Failure [CSIT-Job-Build-Id]"
1951     ]
1952
1953     # Generate the data for the table according to the model in the table
1954     # specification
1955
1956     now = dt.utcnow()
1957     timeperiod = timedelta(int(table.get(u"window", 7)))
1958
1959     tbl_dict = dict()
1960     for job, builds in table[u"data"].items():
1961         for build in builds:
1962             build = str(build)
1963             for tst_name, tst_data in data[job][build].items():
1964                 if tst_name.lower() in table.get(u"ignore-list", list()):
1965                     continue
1966                 if tbl_dict.get(tst_name, None) is None:
1967                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1968                     if not groups:
1969                         continue
1970                     nic = groups.group(0)
1971                     tbl_dict[tst_name] = {
1972                         u"name": f"{nic}-{tst_data[u'name']}",
1973                         u"data": OrderedDict()
1974                     }
1975                 try:
1976                     generated = input_data.metadata(job, build).\
1977                         get(u"generated", u"")
1978                     if not generated:
1979                         continue
1980                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1981                     if (now - then) <= timeperiod:
1982                         tbl_dict[tst_name][u"data"][build] = (
1983                             tst_data[u"status"],
1984                             generated,
1985                             input_data.metadata(job, build).get(u"version",
1986                                                                 u""),
1987                             build
1988                         )
1989                 except (TypeError, KeyError) as err:
1990                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1991
1992     max_fails = 0
1993     tbl_lst = list()
1994     for tst_data in tbl_dict.values():
1995         fails_nr = 0
1996         fails_last_date = u""
1997         fails_last_vpp = u""
1998         fails_last_csit = u""
1999         for val in tst_data[u"data"].values():
2000             if val[0] == u"FAIL":
2001                 fails_nr += 1
2002                 fails_last_date = val[1]
2003                 fails_last_vpp = val[2]
2004                 fails_last_csit = val[3]
2005         if fails_nr:
2006             max_fails = fails_nr if fails_nr > max_fails else max_fails
2007             tbl_lst.append(
2008                 [
2009                     tst_data[u"name"],
2010                     fails_nr,
2011                     fails_last_date,
2012                     fails_last_vpp,
2013                     f"mrr-daily-build-{fails_last_csit}"
2014                 ]
2015             )
2016
2017     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2018     tbl_sorted = list()
2019     for nrf in range(max_fails, -1, -1):
2020         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2021         tbl_sorted.extend(tbl_fails)
2022
2023     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2024     logging.info(f"    Writing file: {file_name}")
2025     with open(file_name, u"wt") as file_handler:
2026         file_handler.write(u",".join(header) + u"\n")
2027         for test in tbl_sorted:
2028             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2029
2030     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2031     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2032
2033
2034 def table_failed_tests_html(table, input_data):
2035     """Generate the table(s) with algorithm: table_failed_tests_html
2036     specified in the specification file.
2037
2038     :param table: Table to generate.
2039     :param input_data: Data to process.
2040     :type table: pandas.Series
2041     :type input_data: InputData
2042     """
2043
2044     _ = input_data
2045
2046     if not table.get(u"testbed", None):
2047         logging.error(
2048             f"The testbed is not defined for the table "
2049             f"{table.get(u'title', u'')}."
2050         )
2051         return
2052
2053     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2054
2055     try:
2056         with open(table[u"input-file"], u'rt') as csv_file:
2057             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2058     except KeyError:
2059         logging.warning(u"The input file is not defined.")
2060         return
2061     except csv.Error as err:
2062         logging.warning(
2063             f"Not possible to process the file {table[u'input-file']}.\n"
2064             f"{repr(err)}"
2065         )
2066         return
2067
2068     # Table:
2069     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2070
2071     # Table header:
2072     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2073     for idx, item in enumerate(csv_lst[0]):
2074         alignment = u"left" if idx == 0 else u"center"
2075         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2076         thead.text = item
2077
2078     # Rows:
2079     colors = (u"#e9f1fb", u"#d4e4f7")
2080     for r_idx, row in enumerate(csv_lst[1:]):
2081         background = colors[r_idx % 2]
2082         trow = ET.SubElement(
2083             failed_tests, u"tr", attrib=dict(bgcolor=background)
2084         )
2085
2086         # Columns:
2087         for c_idx, item in enumerate(row):
2088             tdata = ET.SubElement(
2089                 trow,
2090                 u"td",
2091                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2092             )
2093             # Name:
2094             if c_idx == 0:
2095                 ref = ET.SubElement(
2096                     tdata,
2097                     u"a",
2098                     attrib=dict(
2099                         href=f"../trending/"
2100                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2101                     )
2102                 )
2103                 ref.text = item
2104             else:
2105                 tdata.text = item
2106     try:
2107         with open(table[u"output-file"], u'w') as html_file:
2108             logging.info(f"    Writing file: {table[u'output-file']}")
2109             html_file.write(u".. raw:: html\n\n\t")
2110             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2111             html_file.write(u"\n\t<p><br><br></p>\n")
2112     except KeyError:
2113         logging.warning(u"The output file is not defined.")
2114         return