Trending: Reorganization
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_details(table, input_data):
76     """Generate the table(s) with algorithm: table_detailed_test_results
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(table)
93
94     # Prepare the header of the tables
95     header = list()
96     for column in table[u"columns"]:
97         header.append(
98             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
99         )
100
101     # Generate the data for the table according to the model in the table
102     # specification
103     job = list(table[u"data"].keys())[0]
104     build = str(table[u"data"][job][0])
105     try:
106         suites = input_data.suites(job, build)
107     except KeyError:
108         logging.error(
109             u"    No data available. The table will not be generated."
110         )
111         return
112
113     for suite in suites.values:
114         # Generate data
115         suite_name = suite[u"name"]
116         table_lst = list()
117         for test in data[job][build].keys():
118             if data[job][build][test][u"parent"] not in suite_name:
119                 continue
120             row_lst = list()
121             for column in table[u"columns"]:
122                 try:
123                     col_data = str(data[job][build][test][column[
124                         u"data"].split(" ")[1]]).replace(u'"', u'""')
125                     if column[u"data"].split(u" ")[1] in \
126                         (u"conf-history", u"show-run"):
127                         col_data = col_data.replace(u" |br| ", u"", )
128                         col_data = f" |prein| {col_data[:-5]} |preout| "
129                     row_lst.append(f'"{col_data}"')
130                 except KeyError:
131                     row_lst.append(u"No data")
132             table_lst.append(row_lst)
133
134         # Write the data to file
135         if table_lst:
136             file_name = (
137                 f"{table[u'output-file']}_{suite_name}"
138                 f"{table[u'output-file-ext']}"
139             )
140             logging.info(f"      Writing file: {file_name}")
141             with open(file_name, u"w") as file_handler:
142                 file_handler.write(u",".join(header) + u"\n")
143                 for item in table_lst:
144                     file_handler.write(u",".join(item) + u"\n")
145
146     logging.info(u"  Done.")
147
148
149 def table_merged_details(table, input_data):
150     """Generate the table(s) with algorithm: table_merged_details
151     specified in the specification file.
152
153     :param table: Table to generate.
154     :param input_data: Data to process.
155     :type table: pandas.Series
156     :type input_data: InputData
157     """
158
159     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
160     # Transform the data
161     logging.info(
162         f"    Creating the data set for the {table.get(u'type', u'')} "
163         f"{table.get(u'title', u'')}."
164     )
165     data = input_data.filter_data(table, continue_on_error=True)
166     data = input_data.merge_data(data)
167     data.sort_index(inplace=True)
168
169     logging.info(
170         f"    Creating the data set for the {table.get(u'type', u'')} "
171         f"{table.get(u'title', u'')}."
172     )
173     suites = input_data.filter_data(
174         table, continue_on_error=True, data_set=u"suites")
175     suites = input_data.merge_data(suites)
176
177     # Prepare the header of the tables
178     header = list()
179     for column in table[u"columns"]:
180         header.append(
181             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
182         )
183
184     for suite in suites.values:
185         # Generate data
186         suite_name = suite[u"name"]
187         table_lst = list()
188         for test in data.keys():
189             if data[test][u"parent"] not in suite_name:
190                 continue
191             row_lst = list()
192             for column in table[u"columns"]:
193                 try:
194                     col_data = str(data[test][column[
195                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
196                     col_data = col_data.replace(
197                         u"No Data", u"Not Captured     "
198                     )
199                     if column[u"data"].split(u" ")[1] in \
200                         (u"conf-history", u"show-run"):
201                         col_data = col_data.replace(u" |br| ", u"", 1)
202                         col_data = f" |prein| {col_data[:-5]} |preout| "
203                     row_lst.append(f'"{col_data}"')
204                 except KeyError:
205                     row_lst.append(u'"Not captured"')
206             table_lst.append(row_lst)
207
208         # Write the data to file
209         if table_lst:
210             file_name = (
211                 f"{table[u'output-file']}_{suite_name}"
212                 f"{table[u'output-file-ext']}"
213             )
214             logging.info(f"      Writing file: {file_name}")
215             with open(file_name, u"w") as file_handler:
216                 file_handler.write(u",".join(header) + u"\n")
217                 for item in table_lst:
218                     file_handler.write(u",".join(item) + u"\n")
219
220     logging.info(u"  Done.")
221
222
223 def _tpc_modify_test_name(test_name):
224     """Modify a test name by replacing its parts.
225
226     :param test_name: Test name to be modified.
227     :type test_name: str
228     :returns: Modified test name.
229     :rtype: str
230     """
231     test_name_mod = test_name.\
232         replace(u"-ndrpdrdisc", u""). \
233         replace(u"-ndrpdr", u"").\
234         replace(u"-pdrdisc", u""). \
235         replace(u"-ndrdisc", u"").\
236         replace(u"-pdr", u""). \
237         replace(u"-ndr", u""). \
238         replace(u"1t1c", u"1c").\
239         replace(u"2t1c", u"1c"). \
240         replace(u"2t2c", u"2c").\
241         replace(u"4t2c", u"2c"). \
242         replace(u"4t4c", u"4c").\
243         replace(u"8t4c", u"4c")
244
245     return re.sub(REGEX_NIC, u"", test_name_mod)
246
247
248 def _tpc_modify_displayed_test_name(test_name):
249     """Modify a test name which is displayed in a table by replacing its parts.
250
251     :param test_name: Test name to be modified.
252     :type test_name: str
253     :returns: Modified test name.
254     :rtype: str
255     """
256     return test_name.\
257         replace(u"1t1c", u"1c").\
258         replace(u"2t1c", u"1c"). \
259         replace(u"2t2c", u"2c").\
260         replace(u"4t2c", u"2c"). \
261         replace(u"4t4c", u"4c").\
262         replace(u"8t4c", u"4c")
263
264
265 def _tpc_insert_data(target, src, include_tests):
266     """Insert src data to the target structure.
267
268     :param target: Target structure where the data is placed.
269     :param src: Source data to be placed into the target stucture.
270     :param include_tests: Which results will be included (MRR, NDR, PDR).
271     :type target: list
272     :type src: dict
273     :type include_tests: str
274     """
275     try:
276         if include_tests == u"MRR":
277             target.append(src[u"result"][u"receive-rate"])
278         elif include_tests == u"PDR":
279             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
280         elif include_tests == u"NDR":
281             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
282     except (KeyError, TypeError):
283         pass
284
285
286 def _tpc_sort_table(table):
287     """Sort the table this way:
288
289     1. Put "New in CSIT-XXXX" at the first place.
290     2. Put "See footnote" at the second place.
291     3. Sort the rest by "Delta".
292
293     :param table: Table to sort.
294     :type table: list
295     :returns: Sorted table.
296     :rtype: list
297     """
298
299
300     tbl_new = list()
301     tbl_see = list()
302     tbl_delta = list()
303     for item in table:
304         if isinstance(item[-1], str):
305             if u"New in CSIT" in item[-1]:
306                 tbl_new.append(item)
307             elif u"See footnote" in item[-1]:
308                 tbl_see.append(item)
309         else:
310             tbl_delta.append(item)
311
312     # Sort the tables:
313     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
314     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
315     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
316     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
317
318     # Put the tables together:
319     table = list()
320     table.extend(tbl_new)
321     table.extend(tbl_see)
322     table.extend(tbl_delta)
323
324     return table
325
326
327 def _tpc_generate_html_table(header, data, output_file_name):
328     """Generate html table from input data with simple sorting possibility.
329
330     :param header: Table header.
331     :param data: Input data to be included in the table. It is a list of lists.
332         Inner lists are rows in the table. All inner lists must be of the same
333         length. The length of these lists must be the same as the length of the
334         header.
335     :param output_file_name: The name (relative or full path) where the
336         generated html table is written.
337     :type header: list
338     :type data: list of lists
339     :type output_file_name: str
340     """
341
342     df_data = pd.DataFrame(data, columns=header)
343
344     df_sorted = [df_data.sort_values(
345         by=[key, header[0]], ascending=[True, True]
346         if key != header[0] else [False, True]) for key in header]
347     df_sorted_rev = [df_data.sort_values(
348         by=[key, header[0]], ascending=[False, True]
349         if key != header[0] else [True, True]) for key in header]
350     df_sorted.extend(df_sorted_rev)
351
352     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
353                    for idx in range(len(df_data))]]
354     table_header = dict(
355         values=[f"<b>{item}</b>" for item in header],
356         fill_color=u"#7eade7",
357         align=[u"left", u"center"]
358     )
359
360     fig = go.Figure()
361
362     for table in df_sorted:
363         columns = [table.get(col) for col in header]
364         fig.add_trace(
365             go.Table(
366                 columnwidth=[30, 10],
367                 header=table_header,
368                 cells=dict(
369                     values=columns,
370                     fill_color=fill_color,
371                     align=[u"left", u"right"]
372                 )
373             )
374         )
375
376     buttons = list()
377     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
378     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
379     menu_items.extend(menu_items_rev)
380     for idx, hdr in enumerate(menu_items):
381         visible = [False, ] * len(menu_items)
382         visible[idx] = True
383         buttons.append(
384             dict(
385                 label=hdr.replace(u" [Mpps]", u""),
386                 method=u"update",
387                 args=[{u"visible": visible}],
388             )
389         )
390
391     fig.update_layout(
392         updatemenus=[
393             go.layout.Updatemenu(
394                 type=u"dropdown",
395                 direction=u"down",
396                 x=0.03,
397                 xanchor=u"left",
398                 y=1.045,
399                 yanchor=u"top",
400                 active=len(menu_items) - 1,
401                 buttons=list(buttons)
402             )
403         ],
404         annotations=[
405             go.layout.Annotation(
406                 text=u"<b>Sort by:</b>",
407                 x=0,
408                 xref=u"paper",
409                 y=1.035,
410                 yref=u"paper",
411                 align=u"left",
412                 showarrow=False
413             )
414         ]
415     )
416
417     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
418
419
420 def table_perf_comparison(table, input_data):
421     """Generate the table(s) with algorithm: table_perf_comparison
422     specified in the specification file.
423
424     :param table: Table to generate.
425     :param input_data: Data to process.
426     :type table: pandas.Series
427     :type input_data: InputData
428     """
429
430     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
431
432     # Transform the data
433     logging.info(
434         f"    Creating the data set for the {table.get(u'type', u'')} "
435         f"{table.get(u'title', u'')}."
436     )
437     data = input_data.filter_data(table, continue_on_error=True)
438
439     # Prepare the header of the tables
440     try:
441         header = [u"Test case", ]
442
443         if table[u"include-tests"] == u"MRR":
444             hdr_param = u"Rec Rate"
445         else:
446             hdr_param = u"Thput"
447
448         history = table.get(u"history", list())
449         for item in history:
450             header.extend(
451                 [
452                     f"{item[u'title']} {hdr_param} [Mpps]",
453                     f"{item[u'title']} Stdev [Mpps]"
454                 ]
455             )
456         header.extend(
457             [
458                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
459                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
460                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
461                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
462                 u"Delta [%]"
463             ]
464         )
465         header_str = u",".join(header) + u"\n"
466     except (AttributeError, KeyError) as err:
467         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
468         return
469
470     # Prepare data to the table:
471     tbl_dict = dict()
472     topo = ""
473     for job, builds in table[u"reference"][u"data"].items():
474         topo = u"2n-skx" if u"2n-skx" in job else u""
475         for build in builds:
476             for tst_name, tst_data in data[job][str(build)].items():
477                 tst_name_mod = _tpc_modify_test_name(tst_name)
478                 if u"across topologies" in table[u"title"].lower():
479                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
480                 if tbl_dict.get(tst_name_mod, None) is None:
481                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
482                     nic = groups.group(0) if groups else u""
483                     name = \
484                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
485                     if u"across testbeds" in table[u"title"].lower() or \
486                             u"across topologies" in table[u"title"].lower():
487                         name = _tpc_modify_displayed_test_name(name)
488                     tbl_dict[tst_name_mod] = {
489                         u"name": name,
490                         u"ref-data": list(),
491                         u"cmp-data": list()
492                     }
493                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
494                                  src=tst_data,
495                                  include_tests=table[u"include-tests"])
496
497     for job, builds in table[u"compare"][u"data"].items():
498         for build in builds:
499             for tst_name, tst_data in data[job][str(build)].items():
500                 tst_name_mod = _tpc_modify_test_name(tst_name)
501                 if u"across topologies" in table[u"title"].lower():
502                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
503                 if tbl_dict.get(tst_name_mod, None) is None:
504                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
505                     nic = groups.group(0) if groups else u""
506                     name = \
507                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
508                     if u"across testbeds" in table[u"title"].lower() or \
509                             u"across topologies" in table[u"title"].lower():
510                         name = _tpc_modify_displayed_test_name(name)
511                     tbl_dict[tst_name_mod] = {
512                         u"name": name,
513                         u"ref-data": list(),
514                         u"cmp-data": list()
515                     }
516                 _tpc_insert_data(
517                     target=tbl_dict[tst_name_mod][u"cmp-data"],
518                     src=tst_data,
519                     include_tests=table[u"include-tests"]
520                 )
521
522     replacement = table[u"compare"].get(u"data-replacement", None)
523     if replacement:
524         create_new_list = True
525         rpl_data = input_data.filter_data(
526             table, data=replacement, continue_on_error=True)
527         for job, builds in replacement.items():
528             for build in builds:
529                 for tst_name, tst_data in rpl_data[job][str(build)].items():
530                     tst_name_mod = _tpc_modify_test_name(tst_name)
531                     if u"across topologies" in table[u"title"].lower():
532                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
533                     if tbl_dict.get(tst_name_mod, None) is None:
534                         name = \
535                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
536                         if u"across testbeds" in table[u"title"].lower() or \
537                                 u"across topologies" in table[u"title"].lower():
538                             name = _tpc_modify_displayed_test_name(name)
539                         tbl_dict[tst_name_mod] = {
540                             u"name": name,
541                             u"ref-data": list(),
542                             u"cmp-data": list()
543                         }
544                     if create_new_list:
545                         create_new_list = False
546                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
547
548                     _tpc_insert_data(
549                         target=tbl_dict[tst_name_mod][u"cmp-data"],
550                         src=tst_data,
551                         include_tests=table[u"include-tests"]
552                     )
553
554     for item in history:
555         for job, builds in item[u"data"].items():
556             for build in builds:
557                 for tst_name, tst_data in data[job][str(build)].items():
558                     tst_name_mod = _tpc_modify_test_name(tst_name)
559                     if u"across topologies" in table[u"title"].lower():
560                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
561                     if tbl_dict.get(tst_name_mod, None) is None:
562                         continue
563                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
564                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
565                     if tbl_dict[tst_name_mod][u"history"].\
566                             get(item[u"title"], None) is None:
567                         tbl_dict[tst_name_mod][u"history"][item[
568                             u"title"]] = list()
569                     try:
570                         if table[u"include-tests"] == u"MRR":
571                             res = tst_data[u"result"][u"receive-rate"]
572                         elif table[u"include-tests"] == u"PDR":
573                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
574                         elif table[u"include-tests"] == u"NDR":
575                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
576                         else:
577                             continue
578                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
579                             append(res)
580                     except (TypeError, KeyError):
581                         pass
582
583     tbl_lst = list()
584     footnote = False
585     for tst_name in tbl_dict:
586         item = [tbl_dict[tst_name][u"name"], ]
587         if history:
588             if tbl_dict[tst_name].get(u"history", None) is not None:
589                 for hist_data in tbl_dict[tst_name][u"history"].values():
590                     if hist_data:
591                         item.append(round(mean(hist_data) / 1000000, 2))
592                         item.append(round(stdev(hist_data) / 1000000, 2))
593                     else:
594                         item.extend([u"Not tested", u"Not tested"])
595             else:
596                 item.extend([u"Not tested", u"Not tested"])
597         data_t = tbl_dict[tst_name][u"ref-data"]
598         if data_t:
599             item.append(round(mean(data_t) / 1000000, 2))
600             item.append(round(stdev(data_t) / 1000000, 2))
601         else:
602             item.extend([u"Not tested", u"Not tested"])
603         data_t = tbl_dict[tst_name][u"cmp-data"]
604         if data_t:
605             item.append(round(mean(data_t) / 1000000, 2))
606             item.append(round(stdev(data_t) / 1000000, 2))
607         else:
608             item.extend([u"Not tested", u"Not tested"])
609         if item[-2] == u"Not tested":
610             pass
611         elif item[-4] == u"Not tested":
612             item.append(u"New in CSIT-1908")
613         elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
614             item.append(u"See footnote [1]")
615             footnote = True
616         elif item[-4] != 0:
617             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
618         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
619             tbl_lst.append(item)
620
621     tbl_lst = _tpc_sort_table(tbl_lst)
622
623     # Generate csv tables:
624     csv_file = f"{table[u'output-file']}.csv"
625     with open(csv_file, u"w") as file_handler:
626         file_handler.write(header_str)
627         for test in tbl_lst:
628             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
629
630     txt_file_name = f"{table[u'output-file']}.txt"
631     convert_csv_to_pretty_txt(csv_file, txt_file_name)
632
633     if footnote:
634         with open(txt_file_name, u'a') as txt_file:
635             txt_file.writelines([
636                 u"\nFootnotes:\n",
637                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
638                 u"2-node testbeds, dot1q encapsulation is now used on both "
639                 u"links of SUT.\n",
640                 u"    Previously dot1q was used only on a single link with the "
641                 u"other link carrying untagged Ethernet frames. This changes "
642                 u"results\n",
643                 u"    in slightly lower throughput in CSIT-1908 for these "
644                 u"tests. See release notes."
645             ])
646
647     # Generate html table:
648     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
649
650
651 def table_perf_comparison_nic(table, input_data):
652     """Generate the table(s) with algorithm: table_perf_comparison
653     specified in the specification file.
654
655     :param table: Table to generate.
656     :param input_data: Data to process.
657     :type table: pandas.Series
658     :type input_data: InputData
659     """
660
661     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
662
663     # Transform the data
664     logging.info(
665         f"    Creating the data set for the {table.get(u'type', u'')} "
666         f"{table.get(u'title', u'')}."
667     )
668     data = input_data.filter_data(table, continue_on_error=True)
669
670     # Prepare the header of the tables
671     try:
672         header = [u"Test case", ]
673
674         if table[u"include-tests"] == u"MRR":
675             hdr_param = u"Rec Rate"
676         else:
677             hdr_param = u"Thput"
678
679         history = table.get(u"history", list())
680         for item in history:
681             header.extend(
682                 [
683                     f"{item[u'title']} {hdr_param} [Mpps]",
684                     f"{item[u'title']} Stdev [Mpps]"
685                 ]
686             )
687         header.extend(
688             [
689                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
690                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
691                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
692                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
693                 u"Delta [%]"
694             ]
695         )
696         header_str = u",".join(header) + u"\n"
697     except (AttributeError, KeyError) as err:
698         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
699         return
700
701     # Prepare data to the table:
702     tbl_dict = dict()
703     topo = u""
704     for job, builds in table[u"reference"][u"data"].items():
705         topo = u"2n-skx" if u"2n-skx" in job else u""
706         for build in builds:
707             for tst_name, tst_data in data[job][str(build)].items():
708                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
709                     continue
710                 tst_name_mod = _tpc_modify_test_name(tst_name)
711                 if u"across topologies" in table[u"title"].lower():
712                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
713                 if tbl_dict.get(tst_name_mod, None) is None:
714                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
715                     if u"across testbeds" in table[u"title"].lower() or \
716                             u"across topologies" in table[u"title"].lower():
717                         name = _tpc_modify_displayed_test_name(name)
718                     tbl_dict[tst_name_mod] = {
719                         u"name": name,
720                         u"ref-data": list(),
721                         u"cmp-data": list()
722                     }
723                 _tpc_insert_data(
724                     target=tbl_dict[tst_name_mod][u"ref-data"],
725                     src=tst_data,
726                     include_tests=table[u"include-tests"]
727                 )
728
729     for job, builds in table[u"compare"][u"data"].items():
730         for build in builds:
731             for tst_name, tst_data in data[job][str(build)].items():
732                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
733                     continue
734                 tst_name_mod = _tpc_modify_test_name(tst_name)
735                 if u"across topologies" in table[u"title"].lower():
736                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
737                 if tbl_dict.get(tst_name_mod, None) is None:
738                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
739                     if u"across testbeds" in table[u"title"].lower() or \
740                             u"across topologies" in table[u"title"].lower():
741                         name = _tpc_modify_displayed_test_name(name)
742                     tbl_dict[tst_name_mod] = {
743                         u"name": name,
744                         u"ref-data": list(),
745                         u"cmp-data": list()
746                     }
747                 _tpc_insert_data(
748                     target=tbl_dict[tst_name_mod][u"cmp-data"],
749                     src=tst_data,
750                     include_tests=table[u"include-tests"]
751                 )
752
753     replacement = table[u"compare"].get(u"data-replacement", None)
754     if replacement:
755         create_new_list = True
756         rpl_data = input_data.filter_data(
757             table, data=replacement, continue_on_error=True)
758         for job, builds in replacement.items():
759             for build in builds:
760                 for tst_name, tst_data in rpl_data[job][str(build)].items():
761                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
762                         continue
763                     tst_name_mod = _tpc_modify_test_name(tst_name)
764                     if u"across topologies" in table[u"title"].lower():
765                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
766                     if tbl_dict.get(tst_name_mod, None) is None:
767                         name = \
768                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
769                         if u"across testbeds" in table[u"title"].lower() or \
770                                 u"across topologies" in table[u"title"].lower():
771                             name = _tpc_modify_displayed_test_name(name)
772                         tbl_dict[tst_name_mod] = {
773                             u"name": name,
774                             u"ref-data": list(),
775                             u"cmp-data": list()
776                         }
777                     if create_new_list:
778                         create_new_list = False
779                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
780
781                     _tpc_insert_data(
782                         target=tbl_dict[tst_name_mod][u"cmp-data"],
783                         src=tst_data,
784                         include_tests=table[u"include-tests"]
785                     )
786
787     for item in history:
788         for job, builds in item[u"data"].items():
789             for build in builds:
790                 for tst_name, tst_data in data[job][str(build)].items():
791                     if item[u"nic"] not in tst_data[u"tags"]:
792                         continue
793                     tst_name_mod = _tpc_modify_test_name(tst_name)
794                     if u"across topologies" in table[u"title"].lower():
795                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
796                     if tbl_dict.get(tst_name_mod, None) is None:
797                         continue
798                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
799                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
800                     if tbl_dict[tst_name_mod][u"history"].\
801                             get(item[u"title"], None) is None:
802                         tbl_dict[tst_name_mod][u"history"][item[
803                             u"title"]] = list()
804                     try:
805                         if table[u"include-tests"] == u"MRR":
806                             res = tst_data[u"result"][u"receive-rate"]
807                         elif table[u"include-tests"] == u"PDR":
808                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
809                         elif table[u"include-tests"] == u"NDR":
810                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
811                         else:
812                             continue
813                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
814                             append(res)
815                     except (TypeError, KeyError):
816                         pass
817
818     tbl_lst = list()
819     footnote = False
820     for tst_name in tbl_dict:
821         item = [tbl_dict[tst_name][u"name"], ]
822         if history:
823             if tbl_dict[tst_name].get(u"history", None) is not None:
824                 for hist_data in tbl_dict[tst_name][u"history"].values():
825                     if hist_data:
826                         item.append(round(mean(hist_data) / 1000000, 2))
827                         item.append(round(stdev(hist_data) / 1000000, 2))
828                     else:
829                         item.extend([u"Not tested", u"Not tested"])
830             else:
831                 item.extend([u"Not tested", u"Not tested"])
832         data_t = tbl_dict[tst_name][u"ref-data"]
833         if data_t:
834             item.append(round(mean(data_t) / 1000000, 2))
835             item.append(round(stdev(data_t) / 1000000, 2))
836         else:
837             item.extend([u"Not tested", u"Not tested"])
838         data_t = tbl_dict[tst_name][u"cmp-data"]
839         if data_t:
840             item.append(round(mean(data_t) / 1000000, 2))
841             item.append(round(stdev(data_t) / 1000000, 2))
842         else:
843             item.extend([u"Not tested", u"Not tested"])
844         if item[-2] == u"Not tested":
845             pass
846         elif item[-4] == u"Not tested":
847             item.append(u"New in CSIT-1908")
848         elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
849             item.append(u"See footnote [1]")
850             footnote = True
851         elif item[-4] != 0:
852             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
853         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
854             tbl_lst.append(item)
855
856     tbl_lst = _tpc_sort_table(tbl_lst)
857
858     # Generate csv tables:
859     csv_file = f"{table[u'output-file']}.csv"
860     with open(csv_file, u"w") as file_handler:
861         file_handler.write(header_str)
862         for test in tbl_lst:
863             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
864
865     txt_file_name = f"{table[u'output-file']}.txt"
866     convert_csv_to_pretty_txt(csv_file, txt_file_name)
867
868     if footnote:
869         with open(txt_file_name, u'a') as txt_file:
870             txt_file.writelines([
871                 u"\nFootnotes:\n",
872                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
873                 u"2-node testbeds, dot1q encapsulation is now used on both "
874                 u"links of SUT.\n",
875                 u"    Previously dot1q was used only on a single link with the "
876                 u"other link carrying untagged Ethernet frames. This changes "
877                 u"results\n",
878                 u"    in slightly lower throughput in CSIT-1908 for these "
879                 u"tests. See release notes."
880             ])
881
882     # Generate html table:
883     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
884
885
886 def table_nics_comparison(table, input_data):
887     """Generate the table(s) with algorithm: table_nics_comparison
888     specified in the specification file.
889
890     :param table: Table to generate.
891     :param input_data: Data to process.
892     :type table: pandas.Series
893     :type input_data: InputData
894     """
895
896     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
897
898     # Transform the data
899     logging.info(
900         f"    Creating the data set for the {table.get(u'type', u'')} "
901         f"{table.get(u'title', u'')}."
902     )
903     data = input_data.filter_data(table, continue_on_error=True)
904
905     # Prepare the header of the tables
906     try:
907         header = [u"Test case", ]
908
909         if table[u"include-tests"] == u"MRR":
910             hdr_param = u"Rec Rate"
911         else:
912             hdr_param = u"Thput"
913
914         header.extend(
915             [
916                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
917                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
918                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
919                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
920                 u"Delta [%]"
921             ]
922         )
923
924     except (AttributeError, KeyError) as err:
925         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
926         return
927
928     # Prepare data to the table:
929     tbl_dict = dict()
930     for job, builds in table[u"data"].items():
931         for build in builds:
932             for tst_name, tst_data in data[job][str(build)].items():
933                 tst_name_mod = _tpc_modify_test_name(tst_name)
934                 if tbl_dict.get(tst_name_mod, None) is None:
935                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
936                     tbl_dict[tst_name_mod] = {
937                         u"name": name,
938                         u"ref-data": list(),
939                         u"cmp-data": list()
940                     }
941                 try:
942                     result = None
943                     if table[u"include-tests"] == u"MRR":
944                         result = tst_data[u"result"][u"receive-rate"]
945                     elif table[u"include-tests"] == u"PDR":
946                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
947                     elif table[u"include-tests"] == u"NDR":
948                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
949                     else:
950                         continue
951
952                     if result and \
953                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
954                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
955                     elif result and \
956                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
957                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
958                 except (TypeError, KeyError) as err:
959                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
960                     # No data in output.xml for this test
961
962     tbl_lst = list()
963     for tst_name in tbl_dict:
964         item = [tbl_dict[tst_name][u"name"], ]
965         data_t = tbl_dict[tst_name][u"ref-data"]
966         if data_t:
967             item.append(round(mean(data_t) / 1000000, 2))
968             item.append(round(stdev(data_t) / 1000000, 2))
969         else:
970             item.extend([None, None])
971         data_t = tbl_dict[tst_name][u"cmp-data"]
972         if data_t:
973             item.append(round(mean(data_t) / 1000000, 2))
974             item.append(round(stdev(data_t) / 1000000, 2))
975         else:
976             item.extend([None, None])
977         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
978             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
979         if len(item) == len(header):
980             tbl_lst.append(item)
981
982     # Sort the table according to the relative change
983     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
984
985     # Generate csv tables:
986     with open(f"{table[u'output-file']}.csv", u"w") as file_handler:
987         file_handler.write(u",".join(header) + u"\n")
988         for test in tbl_lst:
989             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
990
991     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
992                               f"{table[u'output-file']}.txt")
993
994     # Generate html table:
995     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
996
997
998 def table_soak_vs_ndr(table, input_data):
999     """Generate the table(s) with algorithm: table_soak_vs_ndr
1000     specified in the specification file.
1001
1002     :param table: Table to generate.
1003     :param input_data: Data to process.
1004     :type table: pandas.Series
1005     :type input_data: InputData
1006     """
1007
1008     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1009
1010     # Transform the data
1011     logging.info(
1012         f"    Creating the data set for the {table.get(u'type', u'')} "
1013         f"{table.get(u'title', u'')}."
1014     )
1015     data = input_data.filter_data(table, continue_on_error=True)
1016
1017     # Prepare the header of the table
1018     try:
1019         header = [
1020             u"Test case",
1021             f"{table[u'reference'][u'title']} Thput [Mpps]",
1022             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1023             f"{table[u'compare'][u'title']} Thput [Mpps]",
1024             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1025             u"Delta [%]", u"Stdev of delta [%]"
1026         ]
1027         header_str = u",".join(header) + u"\n"
1028     except (AttributeError, KeyError) as err:
1029         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1030         return
1031
1032     # Create a list of available SOAK test results:
1033     tbl_dict = dict()
1034     for job, builds in table[u"compare"][u"data"].items():
1035         for build in builds:
1036             for tst_name, tst_data in data[job][str(build)].items():
1037                 if tst_data[u"type"] == u"SOAK":
1038                     tst_name_mod = tst_name.replace(u"-soak", u"")
1039                     if tbl_dict.get(tst_name_mod, None) is None:
1040                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1041                         nic = groups.group(0) if groups else u""
1042                         name = (
1043                             f"{nic}-"
1044                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1045                         )
1046                         tbl_dict[tst_name_mod] = {
1047                             u"name": name,
1048                             u"ref-data": list(),
1049                             u"cmp-data": list()
1050                         }
1051                     try:
1052                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1053                             tst_data[u"throughput"][u"LOWER"])
1054                     except (KeyError, TypeError):
1055                         pass
1056     tests_lst = tbl_dict.keys()
1057
1058     # Add corresponding NDR test results:
1059     for job, builds in table[u"reference"][u"data"].items():
1060         for build in builds:
1061             for tst_name, tst_data in data[job][str(build)].items():
1062                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1063                     replace(u"-mrr", u"")
1064                 if tst_name_mod not in tests_lst:
1065                     continue
1066                 try:
1067                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1068                         continue
1069                     if table[u"include-tests"] == u"MRR":
1070                         result = tst_data[u"result"][u"receive-rate"]
1071                     elif table[u"include-tests"] == u"PDR":
1072                         result = \
1073                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1074                     elif table[u"include-tests"] == u"NDR":
1075                         result = \
1076                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1077                     else:
1078                         result = None
1079                     if result is not None:
1080                         tbl_dict[tst_name_mod][u"ref-data"].append(
1081                             result)
1082                 except (KeyError, TypeError):
1083                     continue
1084
1085     tbl_lst = list()
1086     for tst_name in tbl_dict:
1087         item = [tbl_dict[tst_name][u"name"], ]
1088         data_r = tbl_dict[tst_name][u"ref-data"]
1089         if data_r:
1090             data_r_mean = mean(data_r)
1091             item.append(round(data_r_mean / 1000000, 2))
1092             data_r_stdev = stdev(data_r)
1093             item.append(round(data_r_stdev / 1000000, 2))
1094         else:
1095             data_r_mean = None
1096             data_r_stdev = None
1097             item.extend([None, None])
1098         data_c = tbl_dict[tst_name][u"cmp-data"]
1099         if data_c:
1100             data_c_mean = mean(data_c)
1101             item.append(round(data_c_mean / 1000000, 2))
1102             data_c_stdev = stdev(data_c)
1103             item.append(round(data_c_stdev / 1000000, 2))
1104         else:
1105             data_c_mean = None
1106             data_c_stdev = None
1107             item.extend([None, None])
1108         if data_r_mean and data_c_mean:
1109             delta, d_stdev = relative_change_stdev(
1110                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1111             item.append(round(delta, 2))
1112             item.append(round(d_stdev, 2))
1113             tbl_lst.append(item)
1114
1115     # Sort the table according to the relative change
1116     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1117
1118     # Generate csv tables:
1119     csv_file = f"{table[u'output-file']}.csv"
1120     with open(csv_file, u"w") as file_handler:
1121         file_handler.write(header_str)
1122         for test in tbl_lst:
1123             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1124
1125     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1126
1127     # Generate html table:
1128     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1129
1130
1131 def table_perf_trending_dash(table, input_data):
1132     """Generate the table(s) with algorithm:
1133     table_perf_trending_dash
1134     specified in the specification file.
1135
1136     :param table: Table to generate.
1137     :param input_data: Data to process.
1138     :type table: pandas.Series
1139     :type input_data: InputData
1140     """
1141
1142     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1143
1144     # Transform the data
1145     logging.info(
1146         f"    Creating the data set for the {table.get(u'type', u'')} "
1147         f"{table.get(u'title', u'')}."
1148     )
1149     data = input_data.filter_data(table, continue_on_error=True)
1150
1151     # Prepare the header of the tables
1152     header = [
1153         u"Test Case",
1154         u"Trend [Mpps]",
1155         u"Short-Term Change [%]",
1156         u"Long-Term Change [%]",
1157         u"Regressions [#]",
1158         u"Progressions [#]"
1159     ]
1160     header_str = u",".join(header) + u"\n"
1161
1162     # Prepare data to the table:
1163     tbl_dict = dict()
1164     for job, builds in table[u"data"].items():
1165         for build in builds:
1166             for tst_name, tst_data in data[job][str(build)].items():
1167                 if tst_name.lower() in table.get(u"ignore-list", list()):
1168                     continue
1169                 if tbl_dict.get(tst_name, None) is None:
1170                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1171                     if not groups:
1172                         continue
1173                     nic = groups.group(0)
1174                     tbl_dict[tst_name] = {
1175                         u"name": f"{nic}-{tst_data[u'name']}",
1176                         u"data": OrderedDict()
1177                     }
1178                 try:
1179                     tbl_dict[tst_name][u"data"][str(build)] = \
1180                         tst_data[u"result"][u"receive-rate"]
1181                 except (TypeError, KeyError):
1182                     pass  # No data in output.xml for this test
1183
1184     tbl_lst = list()
1185     for tst_name in tbl_dict:
1186         data_t = tbl_dict[tst_name][u"data"]
1187         if len(data_t) < 2:
1188             continue
1189
1190         classification_lst, avgs = classify_anomalies(data_t)
1191
1192         win_size = min(len(data_t), table[u"window"])
1193         long_win_size = min(len(data_t), table[u"long-trend-window"])
1194
1195         try:
1196             max_long_avg = max(
1197                 [x for x in avgs[-long_win_size:-win_size]
1198                  if not isnan(x)])
1199         except ValueError:
1200             max_long_avg = nan
1201         last_avg = avgs[-1]
1202         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1203
1204         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1205             rel_change_last = nan
1206         else:
1207             rel_change_last = round(
1208                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1209
1210         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1211             rel_change_long = nan
1212         else:
1213             rel_change_long = round(
1214                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1215
1216         if classification_lst:
1217             if isnan(rel_change_last) and isnan(rel_change_long):
1218                 continue
1219             if isnan(last_avg) or isnan(rel_change_last) or \
1220                     isnan(rel_change_long):
1221                 continue
1222             tbl_lst.append(
1223                 [tbl_dict[tst_name][u"name"],
1224                  round(last_avg / 1000000, 2),
1225                  rel_change_last,
1226                  rel_change_long,
1227                  classification_lst[-win_size:].count(u"regression"),
1228                  classification_lst[-win_size:].count(u"progression")])
1229
1230     tbl_lst.sort(key=lambda rel: rel[0])
1231
1232     tbl_sorted = list()
1233     for nrr in range(table[u"window"], -1, -1):
1234         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1235         for nrp in range(table[u"window"], -1, -1):
1236             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1237             tbl_out.sort(key=lambda rel: rel[2])
1238             tbl_sorted.extend(tbl_out)
1239
1240     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1241
1242     logging.info(f"    Writing file: {file_name}")
1243     with open(file_name, u"w") as file_handler:
1244         file_handler.write(header_str)
1245         for test in tbl_sorted:
1246             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1247
1248     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1249     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1250
1251
1252 def _generate_url(testbed, test_name):
1253     """Generate URL to a trending plot from the name of the test case.
1254
1255     :param testbed: The testbed used for testing.
1256     :param test_name: The name of the test case.
1257     :type testbed: str
1258     :type test_name: str
1259     :returns: The URL to the plot with the trending data for the given test
1260         case.
1261     :rtype str
1262     """
1263
1264     if u"x520" in test_name:
1265         nic = u"x520"
1266     elif u"x710" in test_name:
1267         nic = u"x710"
1268     elif u"xl710" in test_name:
1269         nic = u"xl710"
1270     elif u"xxv710" in test_name:
1271         nic = u"xxv710"
1272     elif u"vic1227" in test_name:
1273         nic = u"vic1227"
1274     elif u"vic1385" in test_name:
1275         nic = u"vic1385"
1276     elif u"x553" in test_name:
1277         nic = u"x553"
1278     else:
1279         nic = u""
1280
1281     if u"64b" in test_name:
1282         frame_size = u"64b"
1283     elif u"78b" in test_name:
1284         frame_size = u"78b"
1285     elif u"imix" in test_name:
1286         frame_size = u"imix"
1287     elif u"9000b" in test_name:
1288         frame_size = u"9000b"
1289     elif u"1518b" in test_name:
1290         frame_size = u"1518b"
1291     elif u"114b" in test_name:
1292         frame_size = u"114b"
1293     else:
1294         frame_size = u""
1295
1296     if u"1t1c" in test_name or \
1297         (u"-1c-" in test_name and
1298          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1299         cores = u"1t1c"
1300     elif u"2t2c" in test_name or \
1301          (u"-2c-" in test_name and
1302           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1303         cores = u"2t2c"
1304     elif u"4t4c" in test_name or \
1305          (u"-4c-" in test_name and
1306           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1307         cores = u"4t4c"
1308     elif u"2t1c" in test_name or \
1309          (u"-1c-" in test_name and
1310           testbed in (u"2n-skx", u"3n-skx")):
1311         cores = u"2t1c"
1312     elif u"4t2c" in test_name:
1313         cores = u"4t2c"
1314     elif u"8t4c" in test_name:
1315         cores = u"8t4c"
1316     else:
1317         cores = u""
1318
1319     if u"testpmd" in test_name:
1320         driver = u"testpmd"
1321     elif u"l3fwd" in test_name:
1322         driver = u"l3fwd"
1323     elif u"avf" in test_name:
1324         driver = u"avf"
1325     elif u"dnv" in testbed or u"tsh" in testbed:
1326         driver = u"ixgbe"
1327     else:
1328         driver = u"i40e"
1329
1330     if u"acl" in test_name or \
1331             u"macip" in test_name or \
1332             u"nat" in test_name or \
1333             u"policer" in test_name or \
1334             u"cop" in test_name:
1335         bsf = u"features"
1336     elif u"scale" in test_name:
1337         bsf = u"scale"
1338     elif u"base" in test_name:
1339         bsf = u"base"
1340     else:
1341         bsf = u"base"
1342
1343     if u"114b" in test_name and u"vhost" in test_name:
1344         domain = u"vts"
1345     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1346         domain = u"dpdk"
1347     elif u"memif" in test_name:
1348         domain = u"container_memif"
1349     elif u"srv6" in test_name:
1350         domain = u"srv6"
1351     elif u"vhost" in test_name:
1352         domain = u"vhost"
1353         if u"vppl2xc" in test_name:
1354             driver += u"-vpp"
1355         else:
1356             driver += u"-testpmd"
1357         if u"lbvpplacp" in test_name:
1358             bsf += u"-link-bonding"
1359     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1360         domain = u"nf_service_density_vnfc"
1361     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1362         domain = u"nf_service_density_cnfc"
1363     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1364         domain = u"nf_service_density_cnfp"
1365     elif u"ipsec" in test_name:
1366         domain = u"ipsec"
1367         if u"sw" in test_name:
1368             bsf += u"-sw"
1369         elif u"hw" in test_name:
1370             bsf += u"-hw"
1371     elif u"ethip4vxlan" in test_name:
1372         domain = u"ip4_tunnels"
1373     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1374         domain = u"ip4"
1375     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1376         domain = u"ip6"
1377     elif u"l2xcbase" in test_name or \
1378             u"l2xcscale" in test_name or \
1379             u"l2bdbasemaclrn" in test_name or \
1380             u"l2bdscale" in test_name or \
1381             u"l2patch" in test_name:
1382         domain = u"l2"
1383     else:
1384         domain = u""
1385
1386     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1387     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1388
1389     return file_name + anchor_name
1390
1391
1392 def table_perf_trending_dash_html(table, input_data):
1393     """Generate the table(s) with algorithm:
1394     table_perf_trending_dash_html specified in the specification
1395     file.
1396
1397     :param table: Table to generate.
1398     :param input_data: Data to process.
1399     :type table: dict
1400     :type input_data: InputData
1401     """
1402
1403     _ = input_data
1404
1405     if not table.get(u"testbed", None):
1406         logging.error(
1407             f"The testbed is not defined for the table "
1408             f"{table.get(u'title', u'')}."
1409         )
1410         return
1411
1412     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1413
1414     try:
1415         with open(table[u"input-file"], u'rt') as csv_file:
1416             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1417     except KeyError:
1418         logging.warning(u"The input file is not defined.")
1419         return
1420     except csv.Error as err:
1421         logging.warning(
1422             f"Not possible to process the file {table[u'input-file']}.\n"
1423             f"{repr(err)}"
1424         )
1425         return
1426
1427     # Table:
1428     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1429
1430     # Table header:
1431     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1432     for idx, item in enumerate(csv_lst[0]):
1433         alignment = u"left" if idx == 0 else u"center"
1434         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1435         thead.text = item
1436
1437     # Rows:
1438     colors = {
1439         u"regression": (
1440             u"#ffcccc",
1441             u"#ff9999"
1442         ),
1443         u"progression": (
1444             u"#c6ecc6",
1445             u"#9fdf9f"
1446         ),
1447         u"normal": (
1448             u"#e9f1fb",
1449             u"#d4e4f7"
1450         )
1451     }
1452     for r_idx, row in enumerate(csv_lst[1:]):
1453         if int(row[4]):
1454             color = u"regression"
1455         elif int(row[5]):
1456             color = u"progression"
1457         else:
1458             color = u"normal"
1459         trow = ET.SubElement(
1460             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1461         )
1462
1463         # Columns:
1464         for c_idx, item in enumerate(row):
1465             tdata = ET.SubElement(
1466                 trow,
1467                 u"td",
1468                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1469             )
1470             # Name:
1471             if c_idx == 0:
1472                 ref = ET.SubElement(
1473                     tdata,
1474                     u"a",
1475                     attrib=dict(
1476                         href=f"../trending/"
1477                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1478                     )
1479                 )
1480                 ref.text = item
1481             else:
1482                 tdata.text = item
1483     try:
1484         with open(table[u"output-file"], u'w') as html_file:
1485             logging.info(f"    Writing file: {table[u'output-file']}")
1486             html_file.write(u".. raw:: html\n\n\t")
1487             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1488             html_file.write(u"\n\t<p><br><br></p>\n")
1489     except KeyError:
1490         logging.warning(u"The output file is not defined.")
1491         return
1492
1493
1494 def table_last_failed_tests(table, input_data):
1495     """Generate the table(s) with algorithm: table_last_failed_tests
1496     specified in the specification file.
1497
1498     :param table: Table to generate.
1499     :param input_data: Data to process.
1500     :type table: pandas.Series
1501     :type input_data: InputData
1502     """
1503
1504     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1505
1506     # Transform the data
1507     logging.info(
1508         f"    Creating the data set for the {table.get(u'type', u'')} "
1509         f"{table.get(u'title', u'')}."
1510     )
1511
1512     data = input_data.filter_data(table, continue_on_error=True)
1513
1514     if data is None or data.empty:
1515         logging.warning(
1516             f"    No data for the {table.get(u'type', u'')} "
1517             f"{table.get(u'title', u'')}."
1518         )
1519         return
1520
1521     tbl_list = list()
1522     for job, builds in table[u"data"].items():
1523         for build in builds:
1524             build = str(build)
1525             try:
1526                 version = input_data.metadata(job, build).get(u"version", u"")
1527             except KeyError:
1528                 logging.error(f"Data for {job}: {build} is not present.")
1529                 return
1530             tbl_list.append(build)
1531             tbl_list.append(version)
1532             failed_tests = list()
1533             passed = 0
1534             failed = 0
1535             for tst_data in data[job][build].values:
1536                 if tst_data[u"status"] != u"FAIL":
1537                     passed += 1
1538                     continue
1539                 failed += 1
1540                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1541                 if not groups:
1542                     continue
1543                 nic = groups.group(0)
1544                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1545             tbl_list.append(str(passed))
1546             tbl_list.append(str(failed))
1547             tbl_list.extend(failed_tests)
1548
1549     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1550     logging.info(f"    Writing file: {file_name}")
1551     with open(file_name, u"w") as file_handler:
1552         for test in tbl_list:
1553             file_handler.write(test + u'\n')
1554
1555
1556 def table_failed_tests(table, input_data):
1557     """Generate the table(s) with algorithm: table_failed_tests
1558     specified in the specification file.
1559
1560     :param table: Table to generate.
1561     :param input_data: Data to process.
1562     :type table: pandas.Series
1563     :type input_data: InputData
1564     """
1565
1566     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1567
1568     # Transform the data
1569     logging.info(
1570         f"    Creating the data set for the {table.get(u'type', u'')} "
1571         f"{table.get(u'title', u'')}."
1572     )
1573     data = input_data.filter_data(table, continue_on_error=True)
1574
1575     # Prepare the header of the tables
1576     header = [
1577         u"Test Case",
1578         u"Failures [#]",
1579         u"Last Failure [Time]",
1580         u"Last Failure [VPP-Build-Id]",
1581         u"Last Failure [CSIT-Job-Build-Id]"
1582     ]
1583
1584     # Generate the data for the table according to the model in the table
1585     # specification
1586
1587     now = dt.utcnow()
1588     timeperiod = timedelta(int(table.get(u"window", 7)))
1589
1590     tbl_dict = dict()
1591     for job, builds in table[u"data"].items():
1592         for build in builds:
1593             build = str(build)
1594             for tst_name, tst_data in data[job][build].items():
1595                 if tst_name.lower() in table.get(u"ignore-list", list()):
1596                     continue
1597                 if tbl_dict.get(tst_name, None) is None:
1598                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1599                     if not groups:
1600                         continue
1601                     nic = groups.group(0)
1602                     tbl_dict[tst_name] = {
1603                         u"name": f"{nic}-{tst_data[u'name']}",
1604                         u"data": OrderedDict()
1605                     }
1606                 try:
1607                     generated = input_data.metadata(job, build).\
1608                         get(u"generated", u"")
1609                     if not generated:
1610                         continue
1611                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1612                     if (now - then) <= timeperiod:
1613                         tbl_dict[tst_name][u"data"][build] = (
1614                             tst_data[u"status"],
1615                             generated,
1616                             input_data.metadata(job, build).get(u"version",
1617                                                                 u""),
1618                             build
1619                         )
1620                 except (TypeError, KeyError) as err:
1621                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1622
1623     max_fails = 0
1624     tbl_lst = list()
1625     for tst_data in tbl_dict.values():
1626         fails_nr = 0
1627         fails_last_date = u""
1628         fails_last_vpp = u""
1629         fails_last_csit = u""
1630         for val in tst_data[u"data"].values():
1631             if val[0] == u"FAIL":
1632                 fails_nr += 1
1633                 fails_last_date = val[1]
1634                 fails_last_vpp = val[2]
1635                 fails_last_csit = val[3]
1636         if fails_nr:
1637             max_fails = fails_nr if fails_nr > max_fails else max_fails
1638             tbl_lst.append(
1639                 [
1640                     tst_data[u"name"],
1641                     fails_nr,
1642                     fails_last_date,
1643                     fails_last_vpp,
1644                     f"mrr-daily-build-{fails_last_csit}"
1645                 ]
1646             )
1647
1648     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1649     tbl_sorted = list()
1650     for nrf in range(max_fails, -1, -1):
1651         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1652         tbl_sorted.extend(tbl_fails)
1653
1654     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1655     logging.info(f"    Writing file: {file_name}")
1656     with open(file_name, u"w") as file_handler:
1657         file_handler.write(u",".join(header) + u"\n")
1658         for test in tbl_sorted:
1659             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1660
1661     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1662     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1663
1664
1665 def table_failed_tests_html(table, input_data):
1666     """Generate the table(s) with algorithm: table_failed_tests_html
1667     specified in the specification file.
1668
1669     :param table: Table to generate.
1670     :param input_data: Data to process.
1671     :type table: pandas.Series
1672     :type input_data: InputData
1673     """
1674
1675     _ = input_data
1676
1677     if not table.get(u"testbed", None):
1678         logging.error(
1679             f"The testbed is not defined for the table "
1680             f"{table.get(u'title', u'')}."
1681         )
1682         return
1683
1684     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1685
1686     try:
1687         with open(table[u"input-file"], u'rt') as csv_file:
1688             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1689     except KeyError:
1690         logging.warning(u"The input file is not defined.")
1691         return
1692     except csv.Error as err:
1693         logging.warning(
1694             f"Not possible to process the file {table[u'input-file']}.\n"
1695             f"{repr(err)}"
1696         )
1697         return
1698
1699     # Table:
1700     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1701
1702     # Table header:
1703     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1704     for idx, item in enumerate(csv_lst[0]):
1705         alignment = u"left" if idx == 0 else u"center"
1706         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1707         thead.text = item
1708
1709     # Rows:
1710     colors = (u"#e9f1fb", u"#d4e4f7")
1711     for r_idx, row in enumerate(csv_lst[1:]):
1712         background = colors[r_idx % 2]
1713         trow = ET.SubElement(
1714             failed_tests, u"tr", attrib=dict(bgcolor=background)
1715         )
1716
1717         # Columns:
1718         for c_idx, item in enumerate(row):
1719             tdata = ET.SubElement(
1720                 trow,
1721                 u"td",
1722                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1723             )
1724             # Name:
1725             if c_idx == 0:
1726                 ref = ET.SubElement(
1727                     tdata,
1728                     u"a",
1729                     attrib=dict(
1730                         href=f"../trending/"
1731                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1732                     )
1733                 )
1734                 ref.text = item
1735             else:
1736                 tdata.text = item
1737     try:
1738         with open(table[u"output-file"], u'w') as html_file:
1739             logging.info(f"    Writing file: {table[u'output-file']}")
1740             html_file.write(u".. raw:: html\n\n\t")
1741             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1742             html_file.write(u"\n\t<p><br><br></p>\n")
1743     except KeyError:
1744         logging.warning(u"The output file is not defined.")
1745         return