PAL: Add HDRHistogram graphs for latency
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_details(table, input_data):
76     """Generate the table(s) with algorithm: table_detailed_test_results
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(table)
93
94     # Prepare the header of the tables
95     header = list()
96     for column in table[u"columns"]:
97         header.append(
98             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
99         )
100
101     # Generate the data for the table according to the model in the table
102     # specification
103     job = list(table[u"data"].keys())[0]
104     build = str(table[u"data"][job][0])
105     try:
106         suites = input_data.suites(job, build)
107     except KeyError:
108         logging.error(
109             u"    No data available. The table will not be generated."
110         )
111         return
112
113     for suite in suites.values:
114         # Generate data
115         suite_name = suite[u"name"]
116         table_lst = list()
117         for test in data[job][build].keys():
118             if data[job][build][test][u"parent"] not in suite_name:
119                 continue
120             row_lst = list()
121             for column in table[u"columns"]:
122                 try:
123                     col_data = str(data[job][build][test][column[
124                         u"data"].split(" ")[1]]).replace(u'"', u'""')
125                     if column[u"data"].split(u" ")[1] in \
126                         (u"conf-history", u"show-run"):
127                         col_data = col_data.replace(u" |br| ", u"", )
128                         col_data = f" |prein| {col_data[:-5]} |preout| "
129                     row_lst.append(f'"{col_data}"')
130                 except KeyError:
131                     row_lst.append(u"No data")
132             table_lst.append(row_lst)
133
134         # Write the data to file
135         if table_lst:
136             file_name = (
137                 f"{table[u'output-file']}_{suite_name}"
138                 f"{table[u'output-file-ext']}"
139             )
140             logging.info(f"      Writing file: {file_name}")
141             with open(file_name, u"w") as file_handler:
142                 file_handler.write(u",".join(header) + u"\n")
143                 for item in table_lst:
144                     file_handler.write(u",".join(item) + u"\n")
145
146     logging.info(u"  Done.")
147
148
149 def table_merged_details(table, input_data):
150     """Generate the table(s) with algorithm: table_merged_details
151     specified in the specification file.
152
153     :param table: Table to generate.
154     :param input_data: Data to process.
155     :type table: pandas.Series
156     :type input_data: InputData
157     """
158
159     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
160     # Transform the data
161     logging.info(
162         f"    Creating the data set for the {table.get(u'type', u'')} "
163         f"{table.get(u'title', u'')}."
164     )
165     data = input_data.filter_data(table, continue_on_error=True)
166     data = input_data.merge_data(data)
167     data.sort_index(inplace=True)
168
169     logging.info(
170         f"    Creating the data set for the {table.get(u'type', u'')} "
171         f"{table.get(u'title', u'')}."
172     )
173     suites = input_data.filter_data(
174         table, continue_on_error=True, data_set=u"suites")
175     suites = input_data.merge_data(suites)
176
177     # Prepare the header of the tables
178     header = list()
179     for column in table[u"columns"]:
180         header.append(
181             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
182         )
183
184     for suite in suites.values:
185         # Generate data
186         suite_name = suite[u"name"]
187         table_lst = list()
188         for test in data.keys():
189             if data[test][u"parent"] not in suite_name:
190                 continue
191             row_lst = list()
192             for column in table[u"columns"]:
193                 try:
194                     col_data = str(data[test][column[
195                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
196                     col_data = col_data.replace(
197                         u"No Data", u"Not Captured     "
198                     )
199                     if column[u"data"].split(u" ")[1] in \
200                         (u"conf-history", u"show-run"):
201                         col_data = col_data.replace(u" |br| ", u"", 1)
202                         col_data = f" |prein| {col_data[:-5]} |preout| "
203                     row_lst.append(f'"{col_data}"')
204                 except KeyError:
205                     row_lst.append(u'"Not captured"')
206             table_lst.append(row_lst)
207
208         # Write the data to file
209         if table_lst:
210             file_name = (
211                 f"{table[u'output-file']}_{suite_name}"
212                 f"{table[u'output-file-ext']}"
213             )
214             logging.info(f"      Writing file: {file_name}")
215             with open(file_name, u"w") as file_handler:
216                 file_handler.write(u",".join(header) + u"\n")
217                 for item in table_lst:
218                     file_handler.write(u",".join(item) + u"\n")
219
220     logging.info(u"  Done.")
221
222
223 def _tpc_modify_test_name(test_name):
224     """Modify a test name by replacing its parts.
225
226     :param test_name: Test name to be modified.
227     :type test_name: str
228     :returns: Modified test name.
229     :rtype: str
230     """
231     test_name_mod = test_name.\
232         replace(u"-ndrpdrdisc", u""). \
233         replace(u"-ndrpdr", u"").\
234         replace(u"-pdrdisc", u""). \
235         replace(u"-ndrdisc", u"").\
236         replace(u"-pdr", u""). \
237         replace(u"-ndr", u""). \
238         replace(u"1t1c", u"1c").\
239         replace(u"2t1c", u"1c"). \
240         replace(u"2t2c", u"2c").\
241         replace(u"4t2c", u"2c"). \
242         replace(u"4t4c", u"4c").\
243         replace(u"8t4c", u"4c")
244
245     return re.sub(REGEX_NIC, u"", test_name_mod)
246
247
248 def _tpc_modify_displayed_test_name(test_name):
249     """Modify a test name which is displayed in a table by replacing its parts.
250
251     :param test_name: Test name to be modified.
252     :type test_name: str
253     :returns: Modified test name.
254     :rtype: str
255     """
256     return test_name.\
257         replace(u"1t1c", u"1c").\
258         replace(u"2t1c", u"1c"). \
259         replace(u"2t2c", u"2c").\
260         replace(u"4t2c", u"2c"). \
261         replace(u"4t4c", u"4c").\
262         replace(u"8t4c", u"4c")
263
264
265 def _tpc_insert_data(target, src, include_tests):
266     """Insert src data to the target structure.
267
268     :param target: Target structure where the data is placed.
269     :param src: Source data to be placed into the target stucture.
270     :param include_tests: Which results will be included (MRR, NDR, PDR).
271     :type target: list
272     :type src: dict
273     :type include_tests: str
274     """
275     try:
276         if include_tests == u"MRR":
277             target.append(src[u"result"][u"receive-rate"])
278         elif include_tests == u"PDR":
279             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
280         elif include_tests == u"NDR":
281             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
282     except (KeyError, TypeError):
283         pass
284
285
286 def _tpc_sort_table(table):
287     """Sort the table this way:
288
289     1. Put "New in CSIT-XXXX" at the first place.
290     2. Put "See footnote" at the second place.
291     3. Sort the rest by "Delta".
292
293     :param table: Table to sort.
294     :type table: list
295     :returns: Sorted table.
296     :rtype: list
297     """
298
299
300     tbl_new = list()
301     tbl_see = list()
302     tbl_delta = list()
303     for item in table:
304         if isinstance(item[-1], str):
305             if u"New in CSIT" in item[-1]:
306                 tbl_new.append(item)
307             elif u"See footnote" in item[-1]:
308                 tbl_see.append(item)
309         else:
310             tbl_delta.append(item)
311
312     # Sort the tables:
313     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
314     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
315     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
316     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
317
318     # Put the tables together:
319     table = list()
320     table.extend(tbl_new)
321     table.extend(tbl_see)
322     table.extend(tbl_delta)
323
324     return table
325
326
327 def _tpc_generate_html_table(header, data, output_file_name):
328     """Generate html table from input data with simple sorting possibility.
329
330     :param header: Table header.
331     :param data: Input data to be included in the table. It is a list of lists.
332         Inner lists are rows in the table. All inner lists must be of the same
333         length. The length of these lists must be the same as the length of the
334         header.
335     :param output_file_name: The name (relative or full path) where the
336         generated html table is written.
337     :type header: list
338     :type data: list of lists
339     :type output_file_name: str
340     """
341
342     df_data = pd.DataFrame(data, columns=header)
343
344     df_sorted = [df_data.sort_values(
345         by=[key, header[0]], ascending=[True, True]
346         if key != header[0] else [False, True]) for key in header]
347     df_sorted_rev = [df_data.sort_values(
348         by=[key, header[0]], ascending=[False, True]
349         if key != header[0] else [True, True]) for key in header]
350     df_sorted.extend(df_sorted_rev)
351
352     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
353                    for idx in range(len(df_data))]]
354     table_header = dict(
355         values=[f"<b>{item}</b>" for item in header],
356         fill_color=u"#7eade7",
357         align=[u"left", u"center"]
358     )
359
360     fig = go.Figure()
361
362     for table in df_sorted:
363         columns = [table.get(col) for col in header]
364         fig.add_trace(
365             go.Table(
366                 columnwidth=[30, 10],
367                 header=table_header,
368                 cells=dict(
369                     values=columns,
370                     fill_color=fill_color,
371                     align=[u"left", u"right"]
372                 )
373             )
374         )
375
376     buttons = list()
377     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
378     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
379     menu_items.extend(menu_items_rev)
380     for idx, hdr in enumerate(menu_items):
381         visible = [False, ] * len(menu_items)
382         visible[idx] = True
383         buttons.append(
384             dict(
385                 label=hdr.replace(u" [Mpps]", u""),
386                 method=u"update",
387                 args=[{u"visible": visible}],
388             )
389         )
390
391     fig.update_layout(
392         updatemenus=[
393             go.layout.Updatemenu(
394                 type=u"dropdown",
395                 direction=u"down",
396                 x=0.03,
397                 xanchor=u"left",
398                 y=1.045,
399                 yanchor=u"top",
400                 active=len(menu_items) - 1,
401                 buttons=list(buttons)
402             )
403         ],
404         annotations=[
405             go.layout.Annotation(
406                 text=u"<b>Sort by:</b>",
407                 x=0,
408                 xref=u"paper",
409                 y=1.035,
410                 yref=u"paper",
411                 align=u"left",
412                 showarrow=False
413             )
414         ]
415     )
416
417     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
418
419
420 def table_perf_comparison(table, input_data):
421     """Generate the table(s) with algorithm: table_perf_comparison
422     specified in the specification file.
423
424     :param table: Table to generate.
425     :param input_data: Data to process.
426     :type table: pandas.Series
427     :type input_data: InputData
428     """
429
430     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
431
432     # Transform the data
433     logging.info(
434         f"    Creating the data set for the {table.get(u'type', u'')} "
435         f"{table.get(u'title', u'')}."
436     )
437     data = input_data.filter_data(table, continue_on_error=True)
438
439     # Prepare the header of the tables
440     try:
441         header = [u"Test case", ]
442
443         if table[u"include-tests"] == u"MRR":
444             hdr_param = u"Rec Rate"
445         else:
446             hdr_param = u"Thput"
447
448         history = table.get(u"history", list())
449         for item in history:
450             header.extend(
451                 [
452                     f"{item[u'title']} {hdr_param} [Mpps]",
453                     f"{item[u'title']} Stdev [Mpps]"
454                 ]
455             )
456         header.extend(
457             [
458                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
459                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
460                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
461                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
462                 u"Delta [%]"
463             ]
464         )
465         header_str = u",".join(header) + u"\n"
466     except (AttributeError, KeyError) as err:
467         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
468         return
469
470     # Prepare data to the table:
471     tbl_dict = dict()
472     topo = ""
473     for job, builds in table[u"reference"][u"data"].items():
474         topo = u"2n-skx" if u"2n-skx" in job else u""
475         for build in builds:
476             for tst_name, tst_data in data[job][str(build)].items():
477                 tst_name_mod = _tpc_modify_test_name(tst_name)
478                 if u"across topologies" in table[u"title"].lower():
479                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
480                 if tbl_dict.get(tst_name_mod, None) is None:
481                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
482                     nic = groups.group(0) if groups else u""
483                     name = \
484                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
485                     if u"across testbeds" in table[u"title"].lower() or \
486                             u"across topologies" in table[u"title"].lower():
487                         name = _tpc_modify_displayed_test_name(name)
488                     tbl_dict[tst_name_mod] = {
489                         u"name": name,
490                         u"ref-data": list(),
491                         u"cmp-data": list()
492                     }
493                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
494                                  src=tst_data,
495                                  include_tests=table[u"include-tests"])
496
497     for job, builds in table[u"compare"][u"data"].items():
498         for build in builds:
499             for tst_name, tst_data in data[job][str(build)].items():
500                 tst_name_mod = _tpc_modify_test_name(tst_name)
501                 if u"across topologies" in table[u"title"].lower():
502                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
503                 if tbl_dict.get(tst_name_mod, None) is None:
504                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
505                     nic = groups.group(0) if groups else u""
506                     name = \
507                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
508                     if u"across testbeds" in table[u"title"].lower() or \
509                             u"across topologies" in table[u"title"].lower():
510                         name = _tpc_modify_displayed_test_name(name)
511                     tbl_dict[tst_name_mod] = {
512                         u"name": name,
513                         u"ref-data": list(),
514                         u"cmp-data": list()
515                     }
516                 _tpc_insert_data(
517                     target=tbl_dict[tst_name_mod][u"cmp-data"],
518                     src=tst_data,
519                     include_tests=table[u"include-tests"]
520                 )
521
522     replacement = table[u"compare"].get(u"data-replacement", None)
523     if replacement:
524         create_new_list = True
525         rpl_data = input_data.filter_data(
526             table, data=replacement, continue_on_error=True)
527         for job, builds in replacement.items():
528             for build in builds:
529                 for tst_name, tst_data in rpl_data[job][str(build)].items():
530                     tst_name_mod = _tpc_modify_test_name(tst_name)
531                     if u"across topologies" in table[u"title"].lower():
532                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
533                     if tbl_dict.get(tst_name_mod, None) is None:
534                         name = \
535                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
536                         if u"across testbeds" in table[u"title"].lower() or \
537                                 u"across topologies" in table[u"title"].lower():
538                             name = _tpc_modify_displayed_test_name(name)
539                         tbl_dict[tst_name_mod] = {
540                             u"name": name,
541                             u"ref-data": list(),
542                             u"cmp-data": list()
543                         }
544                     if create_new_list:
545                         create_new_list = False
546                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
547
548                     _tpc_insert_data(
549                         target=tbl_dict[tst_name_mod][u"cmp-data"],
550                         src=tst_data,
551                         include_tests=table[u"include-tests"]
552                     )
553
554     for item in history:
555         for job, builds in item[u"data"].items():
556             for build in builds:
557                 for tst_name, tst_data in data[job][str(build)].items():
558                     tst_name_mod = _tpc_modify_test_name(tst_name)
559                     if u"across topologies" in table[u"title"].lower():
560                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
561                     if tbl_dict.get(tst_name_mod, None) is None:
562                         continue
563                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
564                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
565                     if tbl_dict[tst_name_mod][u"history"].\
566                             get(item[u"title"], None) is None:
567                         tbl_dict[tst_name_mod][u"history"][item[
568                             u"title"]] = list()
569                     try:
570                         if table[u"include-tests"] == u"MRR":
571                             res = tst_data[u"result"][u"receive-rate"]
572                         elif table[u"include-tests"] == u"PDR":
573                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
574                         elif table[u"include-tests"] == u"NDR":
575                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
576                         else:
577                             continue
578                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
579                             append(res)
580                     except (TypeError, KeyError):
581                         pass
582
583     tbl_lst = list()
584     footnote = False
585     for tst_name in tbl_dict:
586         item = [tbl_dict[tst_name][u"name"], ]
587         if history:
588             if tbl_dict[tst_name].get(u"history", None) is not None:
589                 for hist_data in tbl_dict[tst_name][u"history"].values():
590                     if hist_data:
591                         item.append(round(mean(hist_data) / 1000000, 2))
592                         item.append(round(stdev(hist_data) / 1000000, 2))
593                     else:
594                         item.extend([u"Not tested", u"Not tested"])
595             else:
596                 item.extend([u"Not tested", u"Not tested"])
597         data_t = tbl_dict[tst_name][u"ref-data"]
598         if data_t:
599             item.append(round(mean(data_t) / 1000000, 2))
600             item.append(round(stdev(data_t) / 1000000, 2))
601         else:
602             item.extend([u"Not tested", u"Not tested"])
603         data_t = tbl_dict[tst_name][u"cmp-data"]
604         if data_t:
605             item.append(round(mean(data_t) / 1000000, 2))
606             item.append(round(stdev(data_t) / 1000000, 2))
607         else:
608             item.extend([u"Not tested", u"Not tested"])
609         if item[-2] == u"Not tested":
610             pass
611         elif item[-4] == u"Not tested":
612             item.append(u"New in CSIT-1908")
613         elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
614             item.append(u"See footnote [1]")
615             footnote = True
616         elif item[-4] != 0:
617             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
618         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
619             tbl_lst.append(item)
620
621     tbl_lst = _tpc_sort_table(tbl_lst)
622
623     # Generate csv tables:
624     csv_file = f"{table[u'output-file']}.csv"
625     with open(csv_file, u"w") as file_handler:
626         file_handler.write(header_str)
627         for test in tbl_lst:
628             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
629
630     txt_file_name = f"{table[u'output-file']}.txt"
631     convert_csv_to_pretty_txt(csv_file, txt_file_name)
632
633     if footnote:
634         with open(txt_file_name, u'a') as txt_file:
635             txt_file.writelines([
636                 u"\nFootnotes:\n",
637                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
638                 u"2-node testbeds, dot1q encapsulation is now used on both "
639                 u"links of SUT.\n",
640                 u"    Previously dot1q was used only on a single link with the "
641                 u"other link carrying untagged Ethernet frames. This changes "
642                 u"results\n",
643                 u"    in slightly lower throughput in CSIT-1908 for these "
644                 u"tests. See release notes."
645             ])
646
647     # Generate html table:
648     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
649
650
651 def table_perf_comparison_nic(table, input_data):
652     """Generate the table(s) with algorithm: table_perf_comparison
653     specified in the specification file.
654
655     :param table: Table to generate.
656     :param input_data: Data to process.
657     :type table: pandas.Series
658     :type input_data: InputData
659     """
660
661     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
662
663     # Transform the data
664     logging.info(
665         f"    Creating the data set for the {table.get(u'type', u'')} "
666         f"{table.get(u'title', u'')}."
667     )
668     data = input_data.filter_data(table, continue_on_error=True)
669
670     # Prepare the header of the tables
671     try:
672         header = [u"Test case", ]
673
674         if table[u"include-tests"] == u"MRR":
675             hdr_param = u"Rec Rate"
676         else:
677             hdr_param = u"Thput"
678
679         history = table.get(u"history", list())
680         for item in history:
681             header.extend(
682                 [
683                     f"{item[u'title']} {hdr_param} [Mpps]",
684                     f"{item[u'title']} Stdev [Mpps]"
685                 ]
686             )
687         header.extend(
688             [
689                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
690                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
691                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
692                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
693                 u"Delta [%]"
694             ]
695         )
696         header_str = u",".join(header) + u"\n"
697     except (AttributeError, KeyError) as err:
698         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
699         return
700
701     # Prepare data to the table:
702     tbl_dict = dict()
703     topo = u""
704     for job, builds in table[u"reference"][u"data"].items():
705         topo = u"2n-skx" if u"2n-skx" in job else u""
706         for build in builds:
707             for tst_name, tst_data in data[job][str(build)].items():
708                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
709                     continue
710                 tst_name_mod = _tpc_modify_test_name(tst_name)
711                 if u"across topologies" in table[u"title"].lower():
712                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
713                 if tbl_dict.get(tst_name_mod, None) is None:
714                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
715                     if u"across testbeds" in table[u"title"].lower() or \
716                             u"across topologies" in table[u"title"].lower():
717                         name = _tpc_modify_displayed_test_name(name)
718                     tbl_dict[tst_name_mod] = {
719                         u"name": name,
720                         u"ref-data": list(),
721                         u"cmp-data": list()
722                     }
723                 _tpc_insert_data(
724                     target=tbl_dict[tst_name_mod][u"ref-data"],
725                     src=tst_data,
726                     include_tests=table[u"include-tests"]
727                 )
728
729     for job, builds in table[u"compare"][u"data"].items():
730         for build in builds:
731             for tst_name, tst_data in data[job][str(build)].items():
732                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
733                     continue
734                 tst_name_mod = _tpc_modify_test_name(tst_name)
735                 if u"across topologies" in table[u"title"].lower():
736                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
737                 if tbl_dict.get(tst_name_mod, None) is None:
738                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
739                     if u"across testbeds" in table[u"title"].lower() or \
740                             u"across topologies" in table[u"title"].lower():
741                         name = _tpc_modify_displayed_test_name(name)
742                     tbl_dict[tst_name_mod] = {
743                         u"name": name,
744                         u"ref-data": list(),
745                         u"cmp-data": list()
746                     }
747                 _tpc_insert_data(
748                     target=tbl_dict[tst_name_mod][u"cmp-data"],
749                     src=tst_data,
750                     include_tests=table[u"include-tests"]
751                 )
752
753     replacement = table[u"compare"].get(u"data-replacement", None)
754     if replacement:
755         create_new_list = True
756         rpl_data = input_data.filter_data(
757             table, data=replacement, continue_on_error=True)
758         for job, builds in replacement.items():
759             for build in builds:
760                 for tst_name, tst_data in rpl_data[job][str(build)].items():
761                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
762                         continue
763                     tst_name_mod = _tpc_modify_test_name(tst_name)
764                     if u"across topologies" in table[u"title"].lower():
765                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
766                     if tbl_dict.get(tst_name_mod, None) is None:
767                         name = \
768                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
769                         if u"across testbeds" in table[u"title"].lower() or \
770                                 u"across topologies" in table[u"title"].lower():
771                             name = _tpc_modify_displayed_test_name(name)
772                         tbl_dict[tst_name_mod] = {
773                             u"name": name,
774                             u"ref-data": list(),
775                             u"cmp-data": list()
776                         }
777                     if create_new_list:
778                         create_new_list = False
779                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
780
781                     _tpc_insert_data(
782                         target=tbl_dict[tst_name_mod][u"cmp-data"],
783                         src=tst_data,
784                         include_tests=table[u"include-tests"]
785                     )
786
787     for item in history:
788         for job, builds in item[u"data"].items():
789             for build in builds:
790                 for tst_name, tst_data in data[job][str(build)].items():
791                     if item[u"nic"] not in tst_data[u"tags"]:
792                         continue
793                     tst_name_mod = _tpc_modify_test_name(tst_name)
794                     if u"across topologies" in table[u"title"].lower():
795                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
796                     if tbl_dict.get(tst_name_mod, None) is None:
797                         continue
798                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
799                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
800                     if tbl_dict[tst_name_mod][u"history"].\
801                             get(item[u"title"], None) is None:
802                         tbl_dict[tst_name_mod][u"history"][item[
803                             u"title"]] = list()
804                     try:
805                         if table[u"include-tests"] == u"MRR":
806                             res = tst_data[u"result"][u"receive-rate"]
807                         elif table[u"include-tests"] == u"PDR":
808                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
809                         elif table[u"include-tests"] == u"NDR":
810                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
811                         else:
812                             continue
813                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
814                             append(res)
815                     except (TypeError, KeyError):
816                         pass
817
818     tbl_lst = list()
819     footnote = False
820     for tst_name in tbl_dict:
821         item = [tbl_dict[tst_name][u"name"], ]
822         if history:
823             if tbl_dict[tst_name].get(u"history", None) is not None:
824                 for hist_data in tbl_dict[tst_name][u"history"].values():
825                     if hist_data:
826                         item.append(round(mean(hist_data) / 1000000, 2))
827                         item.append(round(stdev(hist_data) / 1000000, 2))
828                     else:
829                         item.extend([u"Not tested", u"Not tested"])
830             else:
831                 item.extend([u"Not tested", u"Not tested"])
832         data_t = tbl_dict[tst_name][u"ref-data"]
833         if data_t:
834             item.append(round(mean(data_t) / 1000000, 2))
835             item.append(round(stdev(data_t) / 1000000, 2))
836         else:
837             item.extend([u"Not tested", u"Not tested"])
838         data_t = tbl_dict[tst_name][u"cmp-data"]
839         if data_t:
840             item.append(round(mean(data_t) / 1000000, 2))
841             item.append(round(stdev(data_t) / 1000000, 2))
842         else:
843             item.extend([u"Not tested", u"Not tested"])
844         if item[-2] == u"Not tested":
845             pass
846         elif item[-4] == u"Not tested":
847             item.append(u"New in CSIT-1908")
848         elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
849             item.append(u"See footnote [1]")
850             footnote = True
851         elif item[-4] != 0:
852             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
853         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
854             tbl_lst.append(item)
855
856     tbl_lst = _tpc_sort_table(tbl_lst)
857
858     # Generate csv tables:
859     csv_file = f"{table[u'output-file']}.csv"
860     with open(csv_file, u"w") as file_handler:
861         file_handler.write(header_str)
862         for test in tbl_lst:
863             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
864
865     txt_file_name = f"{table[u'output-file']}.txt"
866     convert_csv_to_pretty_txt(csv_file, txt_file_name)
867
868     if footnote:
869         with open(txt_file_name, u'a') as txt_file:
870             txt_file.writelines([
871                 u"\nFootnotes:\n",
872                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
873                 u"2-node testbeds, dot1q encapsulation is now used on both "
874                 u"links of SUT.\n",
875                 u"    Previously dot1q was used only on a single link with the "
876                 u"other link carrying untagged Ethernet frames. This changes "
877                 u"results\n",
878                 u"    in slightly lower throughput in CSIT-1908 for these "
879                 u"tests. See release notes."
880             ])
881
882     # Generate html table:
883     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
884
885
886 def table_nics_comparison(table, input_data):
887     """Generate the table(s) with algorithm: table_nics_comparison
888     specified in the specification file.
889
890     :param table: Table to generate.
891     :param input_data: Data to process.
892     :type table: pandas.Series
893     :type input_data: InputData
894     """
895
896     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
897
898     # Transform the data
899     logging.info(
900         f"    Creating the data set for the {table.get(u'type', u'')} "
901         f"{table.get(u'title', u'')}."
902     )
903     data = input_data.filter_data(table, continue_on_error=True)
904
905     # Prepare the header of the tables
906     try:
907         header = [u"Test case", ]
908
909         if table[u"include-tests"] == u"MRR":
910             hdr_param = u"Rec Rate"
911         else:
912             hdr_param = u"Thput"
913
914         header.extend(
915             [
916                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
917                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
918                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
919                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
920                 u"Delta [%]"
921             ]
922         )
923
924     except (AttributeError, KeyError) as err:
925         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
926         return
927
928     # Prepare data to the table:
929     tbl_dict = dict()
930     for job, builds in table[u"data"].items():
931         for build in builds:
932             for tst_name, tst_data in data[job][str(build)].items():
933                 tst_name_mod = _tpc_modify_test_name(tst_name)
934                 if tbl_dict.get(tst_name_mod, None) is None:
935                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
936                     tbl_dict[tst_name_mod] = {
937                         u"name": name,
938                         u"ref-data": list(),
939                         u"cmp-data": list()
940                     }
941                 try:
942                     result = None
943                     if table[u"include-tests"] == u"MRR":
944                         result = tst_data[u"result"][u"receive-rate"]
945                     elif table[u"include-tests"] == u"PDR":
946                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
947                     elif table[u"include-tests"] == u"NDR":
948                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
949                     else:
950                         continue
951
952                     if result and \
953                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
954                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
955                     elif result and \
956                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
957                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
958                 except (TypeError, KeyError) as err:
959                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
960                     # No data in output.xml for this test
961
962     tbl_lst = list()
963     for tst_name in tbl_dict:
964         item = [tbl_dict[tst_name][u"name"], ]
965         data_t = tbl_dict[tst_name][u"ref-data"]
966         if data_t:
967             item.append(round(mean(data_t) / 1000000, 2))
968             item.append(round(stdev(data_t) / 1000000, 2))
969         else:
970             item.extend([None, None])
971         data_t = tbl_dict[tst_name][u"cmp-data"]
972         if data_t:
973             item.append(round(mean(data_t) / 1000000, 2))
974             item.append(round(stdev(data_t) / 1000000, 2))
975         else:
976             item.extend([None, None])
977         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
978             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
979         if len(item) == len(header):
980             tbl_lst.append(item)
981
982     # Sort the table according to the relative change
983     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
984
985     # Generate csv tables:
986     with open(f"{table[u'output-file']}.csv", u"w") as file_handler:
987         file_handler.write(u",".join(header) + u"\n")
988         for test in tbl_lst:
989             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
990
991     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
992                               f"{table[u'output-file']}.txt")
993
994     # Generate html table:
995     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
996
997
998 def table_soak_vs_ndr(table, input_data):
999     """Generate the table(s) with algorithm: table_soak_vs_ndr
1000     specified in the specification file.
1001
1002     :param table: Table to generate.
1003     :param input_data: Data to process.
1004     :type table: pandas.Series
1005     :type input_data: InputData
1006     """
1007
1008     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1009
1010     # Transform the data
1011     logging.info(
1012         f"    Creating the data set for the {table.get(u'type', u'')} "
1013         f"{table.get(u'title', u'')}."
1014     )
1015     data = input_data.filter_data(table, continue_on_error=True)
1016
1017     # Prepare the header of the table
1018     try:
1019         header = [
1020             u"Test case",
1021             f"{table[u'reference'][u'title']} Thput [Mpps]",
1022             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1023             f"{table[u'compare'][u'title']} Thput [Mpps]",
1024             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1025             u"Delta [%]", u"Stdev of delta [%]"
1026         ]
1027         header_str = u",".join(header) + u"\n"
1028     except (AttributeError, KeyError) as err:
1029         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1030         return
1031
1032     # Create a list of available SOAK test results:
1033     tbl_dict = dict()
1034     for job, builds in table[u"compare"][u"data"].items():
1035         for build in builds:
1036             for tst_name, tst_data in data[job][str(build)].items():
1037                 if tst_data[u"type"] == u"SOAK":
1038                     tst_name_mod = tst_name.replace(u"-soak", u"")
1039                     if tbl_dict.get(tst_name_mod, None) is None:
1040                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1041                         nic = groups.group(0) if groups else u""
1042                         name = (
1043                             f"{nic}-"
1044                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1045                         )
1046                         tbl_dict[tst_name_mod] = {
1047                             u"name": name,
1048                             u"ref-data": list(),
1049                             u"cmp-data": list()
1050                         }
1051                     try:
1052                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1053                             tst_data[u"throughput"][u"LOWER"])
1054                     except (KeyError, TypeError):
1055                         pass
1056     tests_lst = tbl_dict.keys()
1057
1058     # Add corresponding NDR test results:
1059     for job, builds in table[u"reference"][u"data"].items():
1060         for build in builds:
1061             for tst_name, tst_data in data[job][str(build)].items():
1062                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1063                     replace(u"-mrr", u"")
1064                 if tst_name_mod not in tests_lst:
1065                     continue
1066                 try:
1067                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1068                         continue
1069                     if table[u"include-tests"] == u"MRR":
1070                         result = tst_data[u"result"][u"receive-rate"]
1071                     elif table[u"include-tests"] == u"PDR":
1072                         result = \
1073                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1074                     elif table[u"include-tests"] == u"NDR":
1075                         result = \
1076                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1077                     else:
1078                         result = None
1079                     if result is not None:
1080                         tbl_dict[tst_name_mod][u"ref-data"].append(
1081                             result)
1082                 except (KeyError, TypeError):
1083                     continue
1084
1085     tbl_lst = list()
1086     for tst_name in tbl_dict:
1087         item = [tbl_dict[tst_name][u"name"], ]
1088         data_r = tbl_dict[tst_name][u"ref-data"]
1089         if data_r:
1090             data_r_mean = mean(data_r)
1091             item.append(round(data_r_mean / 1000000, 2))
1092             data_r_stdev = stdev(data_r)
1093             item.append(round(data_r_stdev / 1000000, 2))
1094         else:
1095             data_r_mean = None
1096             data_r_stdev = None
1097             item.extend([None, None])
1098         data_c = tbl_dict[tst_name][u"cmp-data"]
1099         if data_c:
1100             data_c_mean = mean(data_c)
1101             item.append(round(data_c_mean / 1000000, 2))
1102             data_c_stdev = stdev(data_c)
1103             item.append(round(data_c_stdev / 1000000, 2))
1104         else:
1105             data_c_mean = None
1106             data_c_stdev = None
1107             item.extend([None, None])
1108         if data_r_mean and data_c_mean:
1109             delta, d_stdev = relative_change_stdev(
1110                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1111             item.append(round(delta, 2))
1112             item.append(round(d_stdev, 2))
1113             tbl_lst.append(item)
1114
1115     # Sort the table according to the relative change
1116     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1117
1118     # Generate csv tables:
1119     csv_file = f"{table[u'output-file']}.csv"
1120     with open(csv_file, u"w") as file_handler:
1121         file_handler.write(header_str)
1122         for test in tbl_lst:
1123             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1124
1125     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1126
1127     # Generate html table:
1128     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1129
1130
1131 def table_perf_trending_dash(table, input_data):
1132     """Generate the table(s) with algorithm:
1133     table_perf_trending_dash
1134     specified in the specification file.
1135
1136     :param table: Table to generate.
1137     :param input_data: Data to process.
1138     :type table: pandas.Series
1139     :type input_data: InputData
1140     """
1141
1142     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1143
1144     # Transform the data
1145     logging.info(
1146         f"    Creating the data set for the {table.get(u'type', u'')} "
1147         f"{table.get(u'title', u'')}."
1148     )
1149     data = input_data.filter_data(table, continue_on_error=True)
1150
1151     # Prepare the header of the tables
1152     header = [
1153         u"Test Case",
1154         u"Trend [Mpps]",
1155         u"Short-Term Change [%]",
1156         u"Long-Term Change [%]",
1157         u"Regressions [#]",
1158         u"Progressions [#]"
1159     ]
1160     header_str = u",".join(header) + u"\n"
1161
1162     # Prepare data to the table:
1163     tbl_dict = dict()
1164     for job, builds in table[u"data"].items():
1165         for build in builds:
1166             for tst_name, tst_data in data[job][str(build)].items():
1167                 if tst_name.lower() in table.get(u"ignore-list", list()):
1168                     continue
1169                 if tbl_dict.get(tst_name, None) is None:
1170                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1171                     if not groups:
1172                         continue
1173                     nic = groups.group(0)
1174                     tbl_dict[tst_name] = {
1175                         u"name": f"{nic}-{tst_data[u'name']}",
1176                         u"data": OrderedDict()
1177                     }
1178                 try:
1179                     tbl_dict[tst_name][u"data"][str(build)] = \
1180                         tst_data[u"result"][u"receive-rate"]
1181                 except (TypeError, KeyError):
1182                     pass  # No data in output.xml for this test
1183
1184     tbl_lst = list()
1185     for tst_name in tbl_dict:
1186         data_t = tbl_dict[tst_name][u"data"]
1187         if len(data_t) < 2:
1188             continue
1189
1190         classification_lst, avgs = classify_anomalies(data_t)
1191
1192         win_size = min(len(data_t), table[u"window"])
1193         long_win_size = min(len(data_t), table[u"long-trend-window"])
1194
1195         try:
1196             max_long_avg = max(
1197                 [x for x in avgs[-long_win_size:-win_size]
1198                  if not isnan(x)])
1199         except ValueError:
1200             max_long_avg = nan
1201         last_avg = avgs[-1]
1202         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1203
1204         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1205             rel_change_last = nan
1206         else:
1207             rel_change_last = round(
1208                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1209
1210         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1211             rel_change_long = nan
1212         else:
1213             rel_change_long = round(
1214                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1215
1216         if classification_lst:
1217             if isnan(rel_change_last) and isnan(rel_change_long):
1218                 continue
1219             if isnan(last_avg) or isnan(rel_change_last) or \
1220                     isnan(rel_change_long):
1221                 continue
1222             tbl_lst.append(
1223                 [tbl_dict[tst_name][u"name"],
1224                  round(last_avg / 1000000, 2),
1225                  rel_change_last,
1226                  rel_change_long,
1227                  classification_lst[-win_size:].count(u"regression"),
1228                  classification_lst[-win_size:].count(u"progression")])
1229
1230     tbl_lst.sort(key=lambda rel: rel[0])
1231
1232     tbl_sorted = list()
1233     for nrr in range(table[u"window"], -1, -1):
1234         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1235         for nrp in range(table[u"window"], -1, -1):
1236             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1237             tbl_out.sort(key=lambda rel: rel[2])
1238             tbl_sorted.extend(tbl_out)
1239
1240     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1241
1242     logging.info(f"    Writing file: {file_name}")
1243     with open(file_name, u"w") as file_handler:
1244         file_handler.write(header_str)
1245         for test in tbl_sorted:
1246             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1247
1248     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1249     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1250
1251
1252 def _generate_url(base, testbed, test_name):
1253     """Generate URL to a trending plot from the name of the test case.
1254
1255     :param base: The base part of URL common to all test cases.
1256     :param testbed: The testbed used for testing.
1257     :param test_name: The name of the test case.
1258     :type base: str
1259     :type testbed: str
1260     :type test_name: str
1261     :returns: The URL to the plot with the trending data for the given test
1262         case.
1263     :rtype str
1264     """
1265
1266     url = base
1267     file_name = u""
1268     anchor = u".html#"
1269     feature = u""
1270
1271     if u"lbdpdk" in test_name or u"lbvpp" in test_name:
1272         file_name = u"link_bonding"
1273
1274     elif u"114b" in test_name and u"vhost" in test_name:
1275         file_name = u"vts"
1276
1277     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1278         file_name = u"dpdk"
1279
1280     elif u"memif" in test_name:
1281         file_name = u"container_memif"
1282         feature = u"-base"
1283
1284     elif u"srv6" in test_name:
1285         file_name = u"srv6"
1286
1287     elif u"vhost" in test_name:
1288         if u"l2xcbase" in test_name or u"l2bdbasemaclrn" in test_name:
1289             file_name = u"vm_vhost_l2"
1290             if u"114b" in test_name:
1291                 feature = u""
1292             elif u"l2xcbase" in test_name and u"x520" in test_name:
1293                 feature = u"-base-l2xc"
1294             elif u"l2bdbasemaclrn" in test_name and u"x520" in test_name:
1295                 feature = u"-base-l2bd"
1296             else:
1297                 feature = u"-base"
1298         elif u"ip4base" in test_name:
1299             file_name = u"vm_vhost_ip4"
1300             feature = u"-base"
1301
1302     elif u"ipsecbasetnlsw" in test_name:
1303         file_name = u"ipsecsw"
1304         feature = u"-base-scale"
1305
1306     elif u"ipsec" in test_name:
1307         file_name = u"ipsec"
1308         feature = u"-base-scale"
1309         if u"hw-" in test_name:
1310             file_name = u"ipsechw"
1311         elif u"sw-" in test_name:
1312             file_name = u"ipsecsw"
1313         if u"-int-" in test_name:
1314             feature = u"-base-scale-int"
1315         elif u"tnl" in test_name:
1316             feature = u"-base-scale-tnl"
1317
1318     elif u"ethip4lispip" in test_name or u"ethip4vxlan" in test_name:
1319         file_name = u"ip4_tunnels"
1320         feature = u"-base"
1321
1322     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1323         file_name = u"ip4"
1324         if u"xl710" in test_name:
1325             feature = u"-base-scale-features"
1326         elif u"iacl" in test_name:
1327             feature = u"-features-iacl"
1328         elif u"oacl" in test_name:
1329             feature = u"-features-oacl"
1330         elif u"snat" in test_name or u"cop" in test_name:
1331             feature = u"-features"
1332         else:
1333             feature = u"-base-scale"
1334
1335     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1336         file_name = u"ip6"
1337         feature = u"-base-scale"
1338
1339     elif u"l2xcbase" in test_name or u"l2xcscale" in test_name \
1340             or u"l2bdbasemaclrn" in test_name or u"l2bdscale" in test_name:
1341         file_name = u"l2"
1342         if u"macip" in test_name:
1343             feature = u"-features-macip"
1344         elif u"iacl" in test_name:
1345             feature = u"-features-iacl"
1346         elif u"oacl" in test_name:
1347             feature = u"-features-oacl"
1348         else:
1349             feature = u"-base-scale"
1350
1351     if u"x520" in test_name:
1352         nic = u"x520-"
1353     elif u"x710" in test_name:
1354         nic = u"x710-"
1355     elif u"xl710" in test_name:
1356         nic = u"xl710-"
1357     elif u"xxv710" in test_name:
1358         nic = u"xxv710-"
1359     elif u"vic1227" in test_name:
1360         nic = u"vic1227-"
1361     elif u"vic1385" in test_name:
1362         nic = u"vic1385-"
1363     elif u"x553" in test_name:
1364         nic = u"x553-"
1365     else:
1366         nic = u""
1367     anchor += nic
1368
1369     if u"64b" in test_name:
1370         framesize = u"64b"
1371     elif u"78b" in test_name:
1372         framesize = u"78b"
1373     elif u"imix" in test_name:
1374         framesize = u"imix"
1375     elif u"9000b" in test_name:
1376         framesize = u"9000b"
1377     elif u"1518b" in test_name:
1378         framesize = u"1518b"
1379     elif u"114b" in test_name:
1380         framesize = u"114b"
1381     else:
1382         framesize = u""
1383     anchor += framesize + u"-"
1384
1385     if u"1t1c" in test_name:
1386         anchor += u"1t1c"
1387     elif u"2t2c" in test_name:
1388         anchor += u"2t2c"
1389     elif u"4t4c" in test_name:
1390         anchor += u"4t4c"
1391     elif u"2t1c" in test_name:
1392         anchor += u"2t1c"
1393     elif u"4t2c" in test_name:
1394         anchor += u"4t2c"
1395     elif u"8t4c" in test_name:
1396         anchor += u"8t4c"
1397
1398     return url + file_name + u"-" + testbed + u"-" + nic + framesize + \
1399         feature.replace("-int", u"").replace("-tnl", u"") + anchor + feature
1400
1401
1402 def table_perf_trending_dash_html(table, input_data):
1403     """Generate the table(s) with algorithm:
1404     table_perf_trending_dash_html specified in the specification
1405     file.
1406
1407     :param table: Table to generate.
1408     :param input_data: Data to process.
1409     :type table: dict
1410     :type input_data: InputData
1411     """
1412
1413     _ = input_data
1414
1415     if not table.get(u"testbed", None):
1416         logging.error(
1417             f"The testbed is not defined for the table "
1418             f"{table.get(u'title', u'')}."
1419         )
1420         return
1421
1422     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1423
1424     try:
1425         with open(table[u"input-file"], u'rt') as csv_file:
1426             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1427     except KeyError:
1428         logging.warning(u"The input file is not defined.")
1429         return
1430     except csv.Error as err:
1431         logging.warning(
1432             f"Not possible to process the file {table[u'input-file']}.\n"
1433             f"{repr(err)}"
1434         )
1435         return
1436
1437     # Table:
1438     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1439
1440     # Table header:
1441     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1442     for idx, item in enumerate(csv_lst[0]):
1443         alignment = u"left" if idx == 0 else u"center"
1444         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1445         thead.text = item
1446
1447     # Rows:
1448     colors = {
1449         u"regression": (
1450             u"#ffcccc",
1451             u"#ff9999"
1452         ),
1453         u"progression": (
1454             u"#c6ecc6",
1455             u"#9fdf9f"
1456         ),
1457         u"normal": (
1458             u"#e9f1fb",
1459             u"#d4e4f7"
1460         )
1461     }
1462     for r_idx, row in enumerate(csv_lst[1:]):
1463         if int(row[4]):
1464             color = u"regression"
1465         elif int(row[5]):
1466             color = u"progression"
1467         else:
1468             color = u"normal"
1469         trow = ET.SubElement(
1470             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1471         )
1472
1473         # Columns:
1474         for c_idx, item in enumerate(row):
1475             tdata = ET.SubElement(
1476                 trow,
1477                 u"td",
1478                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1479             )
1480             # Name:
1481             if c_idx == 0:
1482                 ref = ET.SubElement(
1483                     tdata,
1484                     u"a",
1485                     attrib=dict(
1486                         href=_generate_url(
1487                             u"../trending/",
1488                             table.get(u"testbed", None),
1489                             item
1490                         )
1491                     )
1492                 )
1493                 ref.text = item
1494             else:
1495                 tdata.text = item
1496     try:
1497         with open(table[u"output-file"], u'w') as html_file:
1498             logging.info(f"    Writing file: {table[u'output-file']}")
1499             html_file.write(u".. raw:: html\n\n\t")
1500             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1501             html_file.write(u"\n\t<p><br><br></p>\n")
1502     except KeyError:
1503         logging.warning(u"The output file is not defined.")
1504         return
1505
1506
1507 def table_last_failed_tests(table, input_data):
1508     """Generate the table(s) with algorithm: table_last_failed_tests
1509     specified in the specification file.
1510
1511     :param table: Table to generate.
1512     :param input_data: Data to process.
1513     :type table: pandas.Series
1514     :type input_data: InputData
1515     """
1516
1517     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1518
1519     # Transform the data
1520     logging.info(
1521         f"    Creating the data set for the {table.get(u'type', u'')} "
1522         f"{table.get(u'title', u'')}."
1523     )
1524
1525     data = input_data.filter_data(table, continue_on_error=True)
1526
1527     if data is None or data.empty:
1528         logging.warning(
1529             f"    No data for the {table.get(u'type', u'')} "
1530             f"{table.get(u'title', u'')}."
1531         )
1532         return
1533
1534     tbl_list = list()
1535     for job, builds in table[u"data"].items():
1536         for build in builds:
1537             build = str(build)
1538             try:
1539                 version = input_data.metadata(job, build).get(u"version", u"")
1540             except KeyError:
1541                 logging.error(f"Data for {job}: {build} is not present.")
1542                 return
1543             tbl_list.append(build)
1544             tbl_list.append(version)
1545             failed_tests = list()
1546             passed = 0
1547             failed = 0
1548             for tst_data in data[job][build].values:
1549                 if tst_data[u"status"] != u"FAIL":
1550                     passed += 1
1551                     continue
1552                 failed += 1
1553                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1554                 if not groups:
1555                     continue
1556                 nic = groups.group(0)
1557                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1558             tbl_list.append(str(passed))
1559             tbl_list.append(str(failed))
1560             tbl_list.extend(failed_tests)
1561
1562     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1563     logging.info(f"    Writing file: {file_name}")
1564     with open(file_name, u"w") as file_handler:
1565         for test in tbl_list:
1566             file_handler.write(test + u'\n')
1567
1568
1569 def table_failed_tests(table, input_data):
1570     """Generate the table(s) with algorithm: table_failed_tests
1571     specified in the specification file.
1572
1573     :param table: Table to generate.
1574     :param input_data: Data to process.
1575     :type table: pandas.Series
1576     :type input_data: InputData
1577     """
1578
1579     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1580
1581     # Transform the data
1582     logging.info(
1583         f"    Creating the data set for the {table.get(u'type', u'')} "
1584         f"{table.get(u'title', u'')}."
1585     )
1586     data = input_data.filter_data(table, continue_on_error=True)
1587
1588     # Prepare the header of the tables
1589     header = [
1590         u"Test Case",
1591         u"Failures [#]",
1592         u"Last Failure [Time]",
1593         u"Last Failure [VPP-Build-Id]",
1594         u"Last Failure [CSIT-Job-Build-Id]"
1595     ]
1596
1597     # Generate the data for the table according to the model in the table
1598     # specification
1599
1600     now = dt.utcnow()
1601     timeperiod = timedelta(int(table.get(u"window", 7)))
1602
1603     tbl_dict = dict()
1604     for job, builds in table[u"data"].items():
1605         for build in builds:
1606             build = str(build)
1607             for tst_name, tst_data in data[job][build].items():
1608                 if tst_name.lower() in table.get(u"ignore-list", list()):
1609                     continue
1610                 if tbl_dict.get(tst_name, None) is None:
1611                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1612                     if not groups:
1613                         continue
1614                     nic = groups.group(0)
1615                     tbl_dict[tst_name] = {
1616                         u"name": f"{nic}-{tst_data[u'name']}",
1617                         u"data": OrderedDict()
1618                     }
1619                 try:
1620                     generated = input_data.metadata(job, build).\
1621                         get(u"generated", u"")
1622                     if not generated:
1623                         continue
1624                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1625                     if (now - then) <= timeperiod:
1626                         tbl_dict[tst_name][u"data"][build] = (
1627                             tst_data[u"status"],
1628                             generated,
1629                             input_data.metadata(job, build).get(u"version",
1630                                                                 u""),
1631                             build
1632                         )
1633                 except (TypeError, KeyError) as err:
1634                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1635
1636     max_fails = 0
1637     tbl_lst = list()
1638     for tst_data in tbl_dict.values():
1639         fails_nr = 0
1640         fails_last_date = u""
1641         fails_last_vpp = u""
1642         fails_last_csit = u""
1643         for val in tst_data[u"data"].values():
1644             if val[0] == u"FAIL":
1645                 fails_nr += 1
1646                 fails_last_date = val[1]
1647                 fails_last_vpp = val[2]
1648                 fails_last_csit = val[3]
1649         if fails_nr:
1650             max_fails = fails_nr if fails_nr > max_fails else max_fails
1651             tbl_lst.append(
1652                 [
1653                     tst_data[u"name"],
1654                     fails_nr,
1655                     fails_last_date,
1656                     fails_last_vpp,
1657                     f"mrr-daily-build-{fails_last_csit}"
1658                 ]
1659             )
1660
1661     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1662     tbl_sorted = list()
1663     for nrf in range(max_fails, -1, -1):
1664         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1665         tbl_sorted.extend(tbl_fails)
1666
1667     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1668     logging.info(f"    Writing file: {file_name}")
1669     with open(file_name, u"w") as file_handler:
1670         file_handler.write(u",".join(header) + u"\n")
1671         for test in tbl_sorted:
1672             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1673
1674     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1675     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1676
1677
1678 def table_failed_tests_html(table, input_data):
1679     """Generate the table(s) with algorithm: table_failed_tests_html
1680     specified in the specification file.
1681
1682     :param table: Table to generate.
1683     :param input_data: Data to process.
1684     :type table: pandas.Series
1685     :type input_data: InputData
1686     """
1687
1688     _ = input_data
1689
1690     if not table.get(u"testbed", None):
1691         logging.error(
1692             f"The testbed is not defined for the table "
1693             f"{table.get(u'title', u'')}."
1694         )
1695         return
1696
1697     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1698
1699     try:
1700         with open(table[u"input-file"], u'rt') as csv_file:
1701             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1702     except KeyError:
1703         logging.warning(u"The input file is not defined.")
1704         return
1705     except csv.Error as err:
1706         logging.warning(
1707             f"Not possible to process the file {table[u'input-file']}.\n"
1708             f"{repr(err)}"
1709         )
1710         return
1711
1712     # Table:
1713     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1714
1715     # Table header:
1716     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1717     for idx, item in enumerate(csv_lst[0]):
1718         alignment = u"left" if idx == 0 else u"center"
1719         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1720         thead.text = item
1721
1722     # Rows:
1723     colors = (u"#e9f1fb", u"#d4e4f7")
1724     for r_idx, row in enumerate(csv_lst[1:]):
1725         background = colors[r_idx % 2]
1726         trow = ET.SubElement(
1727             failed_tests, u"tr", attrib=dict(bgcolor=background)
1728         )
1729
1730         # Columns:
1731         for c_idx, item in enumerate(row):
1732             tdata = ET.SubElement(
1733                 trow,
1734                 u"td",
1735                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1736             )
1737             # Name:
1738             if c_idx == 0:
1739                 ref = ET.SubElement(
1740                     tdata,
1741                     u"a",
1742                     attrib=dict(
1743                         href=_generate_url(
1744                             u"../trending/",
1745                             table.get(u"testbed", None),
1746                             item
1747                         )
1748                     )
1749                 )
1750                 ref.text = item
1751             else:
1752                 tdata.text = item
1753     try:
1754         with open(table[u"output-file"], u'w') as html_file:
1755             logging.info(f"    Writing file: {table[u'output-file']}")
1756             html_file.write(u".. raw:: html\n\n\t")
1757             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1758             html_file.write(u"\n\t<p><br><br></p>\n")
1759     except KeyError:
1760         logging.warning(u"The output file is not defined.")
1761         return