Trending: Add mellanox
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_details(table, input_data):
76     """Generate the table(s) with algorithm: table_detailed_test_results
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(table)
93
94     # Prepare the header of the tables
95     header = list()
96     for column in table[u"columns"]:
97         header.append(
98             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
99         )
100
101     # Generate the data for the table according to the model in the table
102     # specification
103     job = list(table[u"data"].keys())[0]
104     build = str(table[u"data"][job][0])
105     try:
106         suites = input_data.suites(job, build)
107     except KeyError:
108         logging.error(
109             u"    No data available. The table will not be generated."
110         )
111         return
112
113     for suite in suites.values:
114         # Generate data
115         suite_name = suite[u"name"]
116         table_lst = list()
117         for test in data[job][build].keys():
118             if data[job][build][test][u"parent"] not in suite_name:
119                 continue
120             row_lst = list()
121             for column in table[u"columns"]:
122                 try:
123                     col_data = str(data[job][build][test][column[
124                         u"data"].split(" ")[1]]).replace(u'"', u'""')
125                     if column[u"data"].split(u" ")[1] in \
126                         (u"conf-history", u"show-run"):
127                         col_data = col_data.replace(u" |br| ", u"", 1)
128                         col_data = f" |prein| {col_data[:-5]} |preout| "
129                     row_lst.append(f'"{col_data}"')
130                 except KeyError:
131                     row_lst.append(u"No data")
132             table_lst.append(row_lst)
133
134         # Write the data to file
135         if table_lst:
136             file_name = (
137                 f"{table[u'output-file']}_{suite_name}"
138                 f"{table[u'output-file-ext']}"
139             )
140             logging.info(f"      Writing file: {file_name}")
141             with open(file_name, u"wt") as file_handler:
142                 file_handler.write(u",".join(header) + u"\n")
143                 for item in table_lst:
144                     file_handler.write(u",".join(item) + u"\n")
145
146     logging.info(u"  Done.")
147
148
149 def table_merged_details(table, input_data):
150     """Generate the table(s) with algorithm: table_merged_details
151     specified in the specification file.
152
153     :param table: Table to generate.
154     :param input_data: Data to process.
155     :type table: pandas.Series
156     :type input_data: InputData
157     """
158
159     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
160     # Transform the data
161     logging.info(
162         f"    Creating the data set for the {table.get(u'type', u'')} "
163         f"{table.get(u'title', u'')}."
164     )
165     data = input_data.filter_data(table, continue_on_error=True)
166     data = input_data.merge_data(data)
167     data.sort_index(inplace=True)
168
169     logging.info(
170         f"    Creating the data set for the {table.get(u'type', u'')} "
171         f"{table.get(u'title', u'')}."
172     )
173     suites = input_data.filter_data(
174         table, continue_on_error=True, data_set=u"suites")
175     suites = input_data.merge_data(suites)
176
177     # Prepare the header of the tables
178     header = list()
179     for column in table[u"columns"]:
180         header.append(
181             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
182         )
183
184     for suite in suites.values:
185         # Generate data
186         suite_name = suite[u"name"]
187         table_lst = list()
188         for test in data.keys():
189             if data[test][u"parent"] not in suite_name:
190                 continue
191             row_lst = list()
192             for column in table[u"columns"]:
193                 try:
194                     col_data = str(data[test][column[
195                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
196                     col_data = col_data.replace(
197                         u"No Data", u"Not Captured     "
198                     )
199                     if column[u"data"].split(u" ")[1] in \
200                         (u"conf-history", u"show-run"):
201                         col_data = col_data.replace(u" |br| ", u"", 1)
202                         col_data = f" |prein| {col_data[:-5]} |preout| "
203                     row_lst.append(f'"{col_data}"')
204                 except KeyError:
205                     row_lst.append(u'"Not captured"')
206             table_lst.append(row_lst)
207
208         # Write the data to file
209         if table_lst:
210             file_name = (
211                 f"{table[u'output-file']}_{suite_name}"
212                 f"{table[u'output-file-ext']}"
213             )
214             logging.info(f"      Writing file: {file_name}")
215             with open(file_name, u"wt") as file_handler:
216                 file_handler.write(u",".join(header) + u"\n")
217                 for item in table_lst:
218                     file_handler.write(u",".join(item) + u"\n")
219
220     logging.info(u"  Done.")
221
222
223 def _tpc_modify_test_name(test_name):
224     """Modify a test name by replacing its parts.
225
226     :param test_name: Test name to be modified.
227     :type test_name: str
228     :returns: Modified test name.
229     :rtype: str
230     """
231     test_name_mod = test_name.\
232         replace(u"-ndrpdrdisc", u""). \
233         replace(u"-ndrpdr", u"").\
234         replace(u"-pdrdisc", u""). \
235         replace(u"-ndrdisc", u"").\
236         replace(u"-pdr", u""). \
237         replace(u"-ndr", u""). \
238         replace(u"1t1c", u"1c").\
239         replace(u"2t1c", u"1c"). \
240         replace(u"2t2c", u"2c").\
241         replace(u"4t2c", u"2c"). \
242         replace(u"4t4c", u"4c").\
243         replace(u"8t4c", u"4c")
244
245     return re.sub(REGEX_NIC, u"", test_name_mod)
246
247
248 def _tpc_modify_displayed_test_name(test_name):
249     """Modify a test name which is displayed in a table by replacing its parts.
250
251     :param test_name: Test name to be modified.
252     :type test_name: str
253     :returns: Modified test name.
254     :rtype: str
255     """
256     return test_name.\
257         replace(u"1t1c", u"1c").\
258         replace(u"2t1c", u"1c"). \
259         replace(u"2t2c", u"2c").\
260         replace(u"4t2c", u"2c"). \
261         replace(u"4t4c", u"4c").\
262         replace(u"8t4c", u"4c")
263
264
265 def _tpc_insert_data(target, src, include_tests):
266     """Insert src data to the target structure.
267
268     :param target: Target structure where the data is placed.
269     :param src: Source data to be placed into the target stucture.
270     :param include_tests: Which results will be included (MRR, NDR, PDR).
271     :type target: list
272     :type src: dict
273     :type include_tests: str
274     """
275     try:
276         if include_tests == u"MRR":
277             target.append(src[u"result"][u"receive-rate"])
278         elif include_tests == u"PDR":
279             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
280         elif include_tests == u"NDR":
281             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
282     except (KeyError, TypeError):
283         pass
284
285
286 def _tpc_sort_table(table):
287     """Sort the table this way:
288
289     1. Put "New in CSIT-XXXX" at the first place.
290     2. Put "See footnote" at the second place.
291     3. Sort the rest by "Delta".
292
293     :param table: Table to sort.
294     :type table: list
295     :returns: Sorted table.
296     :rtype: list
297     """
298
299
300     tbl_new = list()
301     tbl_see = list()
302     tbl_delta = list()
303     for item in table:
304         if isinstance(item[-1], str):
305             if u"New in CSIT" in item[-1]:
306                 tbl_new.append(item)
307             elif u"See footnote" in item[-1]:
308                 tbl_see.append(item)
309         else:
310             tbl_delta.append(item)
311
312     # Sort the tables:
313     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
314     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
315     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
316     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
317
318     # Put the tables together:
319     table = list()
320     table.extend(tbl_new)
321     table.extend(tbl_see)
322     table.extend(tbl_delta)
323
324     return table
325
326
327 def _tpc_generate_html_table(header, data, output_file_name):
328     """Generate html table from input data with simple sorting possibility.
329
330     :param header: Table header.
331     :param data: Input data to be included in the table. It is a list of lists.
332         Inner lists are rows in the table. All inner lists must be of the same
333         length. The length of these lists must be the same as the length of the
334         header.
335     :param output_file_name: The name (relative or full path) where the
336         generated html table is written.
337     :type header: list
338     :type data: list of lists
339     :type output_file_name: str
340     """
341
342     df_data = pd.DataFrame(data, columns=header)
343
344     df_sorted = [df_data.sort_values(
345         by=[key, header[0]], ascending=[True, True]
346         if key != header[0] else [False, True]) for key in header]
347     df_sorted_rev = [df_data.sort_values(
348         by=[key, header[0]], ascending=[False, True]
349         if key != header[0] else [True, True]) for key in header]
350     df_sorted.extend(df_sorted_rev)
351
352     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
353                    for idx in range(len(df_data))]]
354     table_header = dict(
355         values=[f"<b>{item}</b>" for item in header],
356         fill_color=u"#7eade7",
357         align=[u"left", u"center"]
358     )
359
360     fig = go.Figure()
361
362     for table in df_sorted:
363         columns = [table.get(col) for col in header]
364         fig.add_trace(
365             go.Table(
366                 columnwidth=[30, 10],
367                 header=table_header,
368                 cells=dict(
369                     values=columns,
370                     fill_color=fill_color,
371                     align=[u"left", u"right"]
372                 )
373             )
374         )
375
376     buttons = list()
377     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
378     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
379     menu_items.extend(menu_items_rev)
380     for idx, hdr in enumerate(menu_items):
381         visible = [False, ] * len(menu_items)
382         visible[idx] = True
383         buttons.append(
384             dict(
385                 label=hdr.replace(u" [Mpps]", u""),
386                 method=u"update",
387                 args=[{u"visible": visible}],
388             )
389         )
390
391     fig.update_layout(
392         updatemenus=[
393             go.layout.Updatemenu(
394                 type=u"dropdown",
395                 direction=u"down",
396                 x=0.03,
397                 xanchor=u"left",
398                 y=1.045,
399                 yanchor=u"top",
400                 active=len(menu_items) - 1,
401                 buttons=list(buttons)
402             )
403         ],
404         annotations=[
405             go.layout.Annotation(
406                 text=u"<b>Sort by:</b>",
407                 x=0,
408                 xref=u"paper",
409                 y=1.035,
410                 yref=u"paper",
411                 align=u"left",
412                 showarrow=False
413             )
414         ]
415     )
416
417     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
418
419
420 def table_perf_comparison(table, input_data):
421     """Generate the table(s) with algorithm: table_perf_comparison
422     specified in the specification file.
423
424     :param table: Table to generate.
425     :param input_data: Data to process.
426     :type table: pandas.Series
427     :type input_data: InputData
428     """
429
430     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
431
432     # Transform the data
433     logging.info(
434         f"    Creating the data set for the {table.get(u'type', u'')} "
435         f"{table.get(u'title', u'')}."
436     )
437     data = input_data.filter_data(table, continue_on_error=True)
438
439     # Prepare the header of the tables
440     try:
441         header = [u"Test case", ]
442
443         if table[u"include-tests"] == u"MRR":
444             hdr_param = u"Rec Rate"
445         else:
446             hdr_param = u"Thput"
447
448         history = table.get(u"history", list())
449         for item in history:
450             header.extend(
451                 [
452                     f"{item[u'title']} {hdr_param} [Mpps]",
453                     f"{item[u'title']} Stdev [Mpps]"
454                 ]
455             )
456         header.extend(
457             [
458                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
459                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
460                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
461                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
462                 u"Delta [%]"
463             ]
464         )
465         header_str = u",".join(header) + u"\n"
466     except (AttributeError, KeyError) as err:
467         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
468         return
469
470     # Prepare data to the table:
471     tbl_dict = dict()
472     # topo = ""
473     for job, builds in table[u"reference"][u"data"].items():
474         # topo = u"2n-skx" if u"2n-skx" in job else u""
475         for build in builds:
476             for tst_name, tst_data in data[job][str(build)].items():
477                 tst_name_mod = _tpc_modify_test_name(tst_name)
478                 if u"across topologies" in table[u"title"].lower():
479                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
480                 if tbl_dict.get(tst_name_mod, None) is None:
481                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
482                     nic = groups.group(0) if groups else u""
483                     name = \
484                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
485                     if u"across testbeds" in table[u"title"].lower() or \
486                             u"across topologies" in table[u"title"].lower():
487                         name = _tpc_modify_displayed_test_name(name)
488                     tbl_dict[tst_name_mod] = {
489                         u"name": name,
490                         u"ref-data": list(),
491                         u"cmp-data": list()
492                     }
493                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
494                                  src=tst_data,
495                                  include_tests=table[u"include-tests"])
496
497     replacement = table[u"reference"].get(u"data-replacement", None)
498     if replacement:
499         create_new_list = True
500         rpl_data = input_data.filter_data(
501             table, data=replacement, continue_on_error=True)
502         for job, builds in replacement.items():
503             for build in builds:
504                 for tst_name, tst_data in rpl_data[job][str(build)].items():
505                     tst_name_mod = _tpc_modify_test_name(tst_name)
506                     if u"across topologies" in table[u"title"].lower():
507                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
508                     if tbl_dict.get(tst_name_mod, None) is None:
509                         name = \
510                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
511                         if u"across testbeds" in table[u"title"].lower() or \
512                                 u"across topologies" in table[u"title"].lower():
513                             name = _tpc_modify_displayed_test_name(name)
514                         tbl_dict[tst_name_mod] = {
515                             u"name": name,
516                             u"ref-data": list(),
517                             u"cmp-data": list()
518                         }
519                     if create_new_list:
520                         create_new_list = False
521                         tbl_dict[tst_name_mod][u"ref-data"] = list()
522
523                     _tpc_insert_data(
524                         target=tbl_dict[tst_name_mod][u"ref-data"],
525                         src=tst_data,
526                         include_tests=table[u"include-tests"]
527                     )
528
529     for job, builds in table[u"compare"][u"data"].items():
530         for build in builds:
531             for tst_name, tst_data in data[job][str(build)].items():
532                 tst_name_mod = _tpc_modify_test_name(tst_name)
533                 if u"across topologies" in table[u"title"].lower():
534                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
535                 if tbl_dict.get(tst_name_mod, None) is None:
536                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
537                     nic = groups.group(0) if groups else u""
538                     name = \
539                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
540                     if u"across testbeds" in table[u"title"].lower() or \
541                             u"across topologies" in table[u"title"].lower():
542                         name = _tpc_modify_displayed_test_name(name)
543                     tbl_dict[tst_name_mod] = {
544                         u"name": name,
545                         u"ref-data": list(),
546                         u"cmp-data": list()
547                     }
548                 _tpc_insert_data(
549                     target=tbl_dict[tst_name_mod][u"cmp-data"],
550                     src=tst_data,
551                     include_tests=table[u"include-tests"]
552                 )
553
554     replacement = table[u"compare"].get(u"data-replacement", None)
555     if replacement:
556         create_new_list = True
557         rpl_data = input_data.filter_data(
558             table, data=replacement, continue_on_error=True)
559         for job, builds in replacement.items():
560             for build in builds:
561                 for tst_name, tst_data in rpl_data[job][str(build)].items():
562                     tst_name_mod = _tpc_modify_test_name(tst_name)
563                     if u"across topologies" in table[u"title"].lower():
564                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
565                     if tbl_dict.get(tst_name_mod, None) is None:
566                         name = \
567                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
568                         if u"across testbeds" in table[u"title"].lower() or \
569                                 u"across topologies" in table[u"title"].lower():
570                             name = _tpc_modify_displayed_test_name(name)
571                         tbl_dict[tst_name_mod] = {
572                             u"name": name,
573                             u"ref-data": list(),
574                             u"cmp-data": list()
575                         }
576                     if create_new_list:
577                         create_new_list = False
578                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
579
580                     _tpc_insert_data(
581                         target=tbl_dict[tst_name_mod][u"cmp-data"],
582                         src=tst_data,
583                         include_tests=table[u"include-tests"]
584                     )
585
586     for item in history:
587         for job, builds in item[u"data"].items():
588             for build in builds:
589                 for tst_name, tst_data in data[job][str(build)].items():
590                     tst_name_mod = _tpc_modify_test_name(tst_name)
591                     if u"across topologies" in table[u"title"].lower():
592                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
593                     if tbl_dict.get(tst_name_mod, None) is None:
594                         continue
595                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
596                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
597                     if tbl_dict[tst_name_mod][u"history"].\
598                             get(item[u"title"], None) is None:
599                         tbl_dict[tst_name_mod][u"history"][item[
600                             u"title"]] = list()
601                     try:
602                         if table[u"include-tests"] == u"MRR":
603                             res = tst_data[u"result"][u"receive-rate"]
604                         elif table[u"include-tests"] == u"PDR":
605                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
606                         elif table[u"include-tests"] == u"NDR":
607                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
608                         else:
609                             continue
610                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
611                             append(res)
612                     except (TypeError, KeyError):
613                         pass
614
615     tbl_lst = list()
616     footnote = False
617     for tst_name in tbl_dict:
618         item = [tbl_dict[tst_name][u"name"], ]
619         if history:
620             if tbl_dict[tst_name].get(u"history", None) is not None:
621                 for hist_data in tbl_dict[tst_name][u"history"].values():
622                     if hist_data:
623                         item.append(round(mean(hist_data) / 1000000, 2))
624                         item.append(round(stdev(hist_data) / 1000000, 2))
625                     else:
626                         item.extend([u"Not tested", u"Not tested"])
627             else:
628                 item.extend([u"Not tested", u"Not tested"])
629         data_t = tbl_dict[tst_name][u"ref-data"]
630         if data_t:
631             item.append(round(mean(data_t) / 1000000, 2))
632             item.append(round(stdev(data_t) / 1000000, 2))
633         else:
634             item.extend([u"Not tested", u"Not tested"])
635         data_t = tbl_dict[tst_name][u"cmp-data"]
636         if data_t:
637             item.append(round(mean(data_t) / 1000000, 2))
638             item.append(round(stdev(data_t) / 1000000, 2))
639         else:
640             item.extend([u"Not tested", u"Not tested"])
641         if item[-2] == u"Not tested":
642             pass
643         elif item[-4] == u"Not tested":
644             item.append(u"New in CSIT-2001")
645         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
646         #     item.append(u"See footnote [1]")
647         #     footnote = True
648         elif item[-4] != 0:
649             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
650         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
651             tbl_lst.append(item)
652
653     tbl_lst = _tpc_sort_table(tbl_lst)
654
655     # Generate csv tables:
656     csv_file = f"{table[u'output-file']}.csv"
657     with open(csv_file, u"wt") as file_handler:
658         file_handler.write(header_str)
659         for test in tbl_lst:
660             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
661
662     txt_file_name = f"{table[u'output-file']}.txt"
663     convert_csv_to_pretty_txt(csv_file, txt_file_name)
664
665     if footnote:
666         with open(txt_file_name, u'a') as txt_file:
667             txt_file.writelines([
668                 u"\nFootnotes:\n",
669                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
670                 u"2-node testbeds, dot1q encapsulation is now used on both "
671                 u"links of SUT.\n",
672                 u"    Previously dot1q was used only on a single link with the "
673                 u"other link carrying untagged Ethernet frames. This changes "
674                 u"results\n",
675                 u"    in slightly lower throughput in CSIT-1908 for these "
676                 u"tests. See release notes."
677             ])
678
679     # Generate html table:
680     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
681
682
683 def table_perf_comparison_nic(table, input_data):
684     """Generate the table(s) with algorithm: table_perf_comparison
685     specified in the specification file.
686
687     :param table: Table to generate.
688     :param input_data: Data to process.
689     :type table: pandas.Series
690     :type input_data: InputData
691     """
692
693     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
694
695     # Transform the data
696     logging.info(
697         f"    Creating the data set for the {table.get(u'type', u'')} "
698         f"{table.get(u'title', u'')}."
699     )
700     data = input_data.filter_data(table, continue_on_error=True)
701
702     # Prepare the header of the tables
703     try:
704         header = [u"Test case", ]
705
706         if table[u"include-tests"] == u"MRR":
707             hdr_param = u"Rec Rate"
708         else:
709             hdr_param = u"Thput"
710
711         history = table.get(u"history", list())
712         for item in history:
713             header.extend(
714                 [
715                     f"{item[u'title']} {hdr_param} [Mpps]",
716                     f"{item[u'title']} Stdev [Mpps]"
717                 ]
718             )
719         header.extend(
720             [
721                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
722                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
723                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
724                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
725                 u"Delta [%]"
726             ]
727         )
728         header_str = u",".join(header) + u"\n"
729     except (AttributeError, KeyError) as err:
730         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
731         return
732
733     # Prepare data to the table:
734     tbl_dict = dict()
735     # topo = u""
736     for job, builds in table[u"reference"][u"data"].items():
737         # topo = u"2n-skx" if u"2n-skx" in job else u""
738         for build in builds:
739             for tst_name, tst_data in data[job][str(build)].items():
740                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
741                     continue
742                 tst_name_mod = _tpc_modify_test_name(tst_name)
743                 if u"across topologies" in table[u"title"].lower():
744                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
745                 if tbl_dict.get(tst_name_mod, None) is None:
746                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
747                     if u"across testbeds" in table[u"title"].lower() or \
748                             u"across topologies" in table[u"title"].lower():
749                         name = _tpc_modify_displayed_test_name(name)
750                     tbl_dict[tst_name_mod] = {
751                         u"name": name,
752                         u"ref-data": list(),
753                         u"cmp-data": list()
754                     }
755                 _tpc_insert_data(
756                     target=tbl_dict[tst_name_mod][u"ref-data"],
757                     src=tst_data,
758                     include_tests=table[u"include-tests"]
759                 )
760
761     replacement = table[u"reference"].get(u"data-replacement", None)
762     if replacement:
763         create_new_list = True
764         rpl_data = input_data.filter_data(
765             table, data=replacement, continue_on_error=True)
766         for job, builds in replacement.items():
767             for build in builds:
768                 for tst_name, tst_data in rpl_data[job][str(build)].items():
769                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
770                         continue
771                     tst_name_mod = _tpc_modify_test_name(tst_name)
772                     if u"across topologies" in table[u"title"].lower():
773                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
774                     if tbl_dict.get(tst_name_mod, None) is None:
775                         name = \
776                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
777                         if u"across testbeds" in table[u"title"].lower() or \
778                                 u"across topologies" in table[u"title"].lower():
779                             name = _tpc_modify_displayed_test_name(name)
780                         tbl_dict[tst_name_mod] = {
781                             u"name": name,
782                             u"ref-data": list(),
783                             u"cmp-data": list()
784                         }
785                     if create_new_list:
786                         create_new_list = False
787                         tbl_dict[tst_name_mod][u"ref-data"] = list()
788
789                     _tpc_insert_data(
790                         target=tbl_dict[tst_name_mod][u"ref-data"],
791                         src=tst_data,
792                         include_tests=table[u"include-tests"]
793                     )
794
795     for job, builds in table[u"compare"][u"data"].items():
796         for build in builds:
797             for tst_name, tst_data in data[job][str(build)].items():
798                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
799                     continue
800                 tst_name_mod = _tpc_modify_test_name(tst_name)
801                 if u"across topologies" in table[u"title"].lower():
802                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
803                 if tbl_dict.get(tst_name_mod, None) is None:
804                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
805                     if u"across testbeds" in table[u"title"].lower() or \
806                             u"across topologies" in table[u"title"].lower():
807                         name = _tpc_modify_displayed_test_name(name)
808                     tbl_dict[tst_name_mod] = {
809                         u"name": name,
810                         u"ref-data": list(),
811                         u"cmp-data": list()
812                     }
813                 _tpc_insert_data(
814                     target=tbl_dict[tst_name_mod][u"cmp-data"],
815                     src=tst_data,
816                     include_tests=table[u"include-tests"]
817                 )
818
819     replacement = table[u"compare"].get(u"data-replacement", None)
820     if replacement:
821         create_new_list = True
822         rpl_data = input_data.filter_data(
823             table, data=replacement, continue_on_error=True)
824         for job, builds in replacement.items():
825             for build in builds:
826                 for tst_name, tst_data in rpl_data[job][str(build)].items():
827                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
828                         continue
829                     tst_name_mod = _tpc_modify_test_name(tst_name)
830                     if u"across topologies" in table[u"title"].lower():
831                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
832                     if tbl_dict.get(tst_name_mod, None) is None:
833                         name = \
834                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
835                         if u"across testbeds" in table[u"title"].lower() or \
836                                 u"across topologies" in table[u"title"].lower():
837                             name = _tpc_modify_displayed_test_name(name)
838                         tbl_dict[tst_name_mod] = {
839                             u"name": name,
840                             u"ref-data": list(),
841                             u"cmp-data": list()
842                         }
843                     if create_new_list:
844                         create_new_list = False
845                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
846
847                     _tpc_insert_data(
848                         target=tbl_dict[tst_name_mod][u"cmp-data"],
849                         src=tst_data,
850                         include_tests=table[u"include-tests"]
851                     )
852
853     for item in history:
854         for job, builds in item[u"data"].items():
855             for build in builds:
856                 for tst_name, tst_data in data[job][str(build)].items():
857                     if item[u"nic"] not in tst_data[u"tags"]:
858                         continue
859                     tst_name_mod = _tpc_modify_test_name(tst_name)
860                     if u"across topologies" in table[u"title"].lower():
861                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
862                     if tbl_dict.get(tst_name_mod, None) is None:
863                         continue
864                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
865                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
866                     if tbl_dict[tst_name_mod][u"history"].\
867                             get(item[u"title"], None) is None:
868                         tbl_dict[tst_name_mod][u"history"][item[
869                             u"title"]] = list()
870                     try:
871                         if table[u"include-tests"] == u"MRR":
872                             res = tst_data[u"result"][u"receive-rate"]
873                         elif table[u"include-tests"] == u"PDR":
874                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
875                         elif table[u"include-tests"] == u"NDR":
876                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
877                         else:
878                             continue
879                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
880                             append(res)
881                     except (TypeError, KeyError):
882                         pass
883
884     tbl_lst = list()
885     footnote = False
886     for tst_name in tbl_dict:
887         item = [tbl_dict[tst_name][u"name"], ]
888         if history:
889             if tbl_dict[tst_name].get(u"history", None) is not None:
890                 for hist_data in tbl_dict[tst_name][u"history"].values():
891                     if hist_data:
892                         item.append(round(mean(hist_data) / 1000000, 2))
893                         item.append(round(stdev(hist_data) / 1000000, 2))
894                     else:
895                         item.extend([u"Not tested", u"Not tested"])
896             else:
897                 item.extend([u"Not tested", u"Not tested"])
898         data_t = tbl_dict[tst_name][u"ref-data"]
899         if data_t:
900             item.append(round(mean(data_t) / 1000000, 2))
901             item.append(round(stdev(data_t) / 1000000, 2))
902         else:
903             item.extend([u"Not tested", u"Not tested"])
904         data_t = tbl_dict[tst_name][u"cmp-data"]
905         if data_t:
906             item.append(round(mean(data_t) / 1000000, 2))
907             item.append(round(stdev(data_t) / 1000000, 2))
908         else:
909             item.extend([u"Not tested", u"Not tested"])
910         if item[-2] == u"Not tested":
911             pass
912         elif item[-4] == u"Not tested":
913             item.append(u"New in CSIT-2001")
914         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
915         #     item.append(u"See footnote [1]")
916         #     footnote = True
917         elif item[-4] != 0:
918             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
919         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
920             tbl_lst.append(item)
921
922     tbl_lst = _tpc_sort_table(tbl_lst)
923
924     # Generate csv tables:
925     csv_file = f"{table[u'output-file']}.csv"
926     with open(csv_file, u"wt") as file_handler:
927         file_handler.write(header_str)
928         for test in tbl_lst:
929             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
930
931     txt_file_name = f"{table[u'output-file']}.txt"
932     convert_csv_to_pretty_txt(csv_file, txt_file_name)
933
934     if footnote:
935         with open(txt_file_name, u'a') as txt_file:
936             txt_file.writelines([
937                 u"\nFootnotes:\n",
938                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
939                 u"2-node testbeds, dot1q encapsulation is now used on both "
940                 u"links of SUT.\n",
941                 u"    Previously dot1q was used only on a single link with the "
942                 u"other link carrying untagged Ethernet frames. This changes "
943                 u"results\n",
944                 u"    in slightly lower throughput in CSIT-1908 for these "
945                 u"tests. See release notes."
946             ])
947
948     # Generate html table:
949     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
950
951
952 def table_nics_comparison(table, input_data):
953     """Generate the table(s) with algorithm: table_nics_comparison
954     specified in the specification file.
955
956     :param table: Table to generate.
957     :param input_data: Data to process.
958     :type table: pandas.Series
959     :type input_data: InputData
960     """
961
962     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
963
964     # Transform the data
965     logging.info(
966         f"    Creating the data set for the {table.get(u'type', u'')} "
967         f"{table.get(u'title', u'')}."
968     )
969     data = input_data.filter_data(table, continue_on_error=True)
970
971     # Prepare the header of the tables
972     try:
973         header = [u"Test case", ]
974
975         if table[u"include-tests"] == u"MRR":
976             hdr_param = u"Rec Rate"
977         else:
978             hdr_param = u"Thput"
979
980         header.extend(
981             [
982                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
983                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
984                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
985                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
986                 u"Delta [%]"
987             ]
988         )
989
990     except (AttributeError, KeyError) as err:
991         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
992         return
993
994     # Prepare data to the table:
995     tbl_dict = dict()
996     for job, builds in table[u"data"].items():
997         for build in builds:
998             for tst_name, tst_data in data[job][str(build)].items():
999                 tst_name_mod = _tpc_modify_test_name(tst_name)
1000                 if tbl_dict.get(tst_name_mod, None) is None:
1001                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1002                     tbl_dict[tst_name_mod] = {
1003                         u"name": name,
1004                         u"ref-data": list(),
1005                         u"cmp-data": list()
1006                     }
1007                 try:
1008                     result = None
1009                     if table[u"include-tests"] == u"MRR":
1010                         result = tst_data[u"result"][u"receive-rate"]
1011                     elif table[u"include-tests"] == u"PDR":
1012                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1013                     elif table[u"include-tests"] == u"NDR":
1014                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1015                     else:
1016                         continue
1017
1018                     if result and \
1019                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1020                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1021                     elif result and \
1022                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1023                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1024                 except (TypeError, KeyError) as err:
1025                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1026                     # No data in output.xml for this test
1027
1028     tbl_lst = list()
1029     for tst_name in tbl_dict:
1030         item = [tbl_dict[tst_name][u"name"], ]
1031         data_t = tbl_dict[tst_name][u"ref-data"]
1032         if data_t:
1033             item.append(round(mean(data_t) / 1000000, 2))
1034             item.append(round(stdev(data_t) / 1000000, 2))
1035         else:
1036             item.extend([None, None])
1037         data_t = tbl_dict[tst_name][u"cmp-data"]
1038         if data_t:
1039             item.append(round(mean(data_t) / 1000000, 2))
1040             item.append(round(stdev(data_t) / 1000000, 2))
1041         else:
1042             item.extend([None, None])
1043         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1044             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1045         if len(item) == len(header):
1046             tbl_lst.append(item)
1047
1048     # Sort the table according to the relative change
1049     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1050
1051     # Generate csv tables:
1052     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1053         file_handler.write(u",".join(header) + u"\n")
1054         for test in tbl_lst:
1055             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1056
1057     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1058                               f"{table[u'output-file']}.txt")
1059
1060     # Generate html table:
1061     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1062
1063
1064 def table_soak_vs_ndr(table, input_data):
1065     """Generate the table(s) with algorithm: table_soak_vs_ndr
1066     specified in the specification file.
1067
1068     :param table: Table to generate.
1069     :param input_data: Data to process.
1070     :type table: pandas.Series
1071     :type input_data: InputData
1072     """
1073
1074     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1075
1076     # Transform the data
1077     logging.info(
1078         f"    Creating the data set for the {table.get(u'type', u'')} "
1079         f"{table.get(u'title', u'')}."
1080     )
1081     data = input_data.filter_data(table, continue_on_error=True)
1082
1083     # Prepare the header of the table
1084     try:
1085         header = [
1086             u"Test case",
1087             f"{table[u'reference'][u'title']} Thput [Mpps]",
1088             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1089             f"{table[u'compare'][u'title']} Thput [Mpps]",
1090             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1091             u"Delta [%]", u"Stdev of delta [%]"
1092         ]
1093         header_str = u",".join(header) + u"\n"
1094     except (AttributeError, KeyError) as err:
1095         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1096         return
1097
1098     # Create a list of available SOAK test results:
1099     tbl_dict = dict()
1100     for job, builds in table[u"compare"][u"data"].items():
1101         for build in builds:
1102             for tst_name, tst_data in data[job][str(build)].items():
1103                 if tst_data[u"type"] == u"SOAK":
1104                     tst_name_mod = tst_name.replace(u"-soak", u"")
1105                     if tbl_dict.get(tst_name_mod, None) is None:
1106                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1107                         nic = groups.group(0) if groups else u""
1108                         name = (
1109                             f"{nic}-"
1110                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1111                         )
1112                         tbl_dict[tst_name_mod] = {
1113                             u"name": name,
1114                             u"ref-data": list(),
1115                             u"cmp-data": list()
1116                         }
1117                     try:
1118                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1119                             tst_data[u"throughput"][u"LOWER"])
1120                     except (KeyError, TypeError):
1121                         pass
1122     tests_lst = tbl_dict.keys()
1123
1124     # Add corresponding NDR test results:
1125     for job, builds in table[u"reference"][u"data"].items():
1126         for build in builds:
1127             for tst_name, tst_data in data[job][str(build)].items():
1128                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1129                     replace(u"-mrr", u"")
1130                 if tst_name_mod not in tests_lst:
1131                     continue
1132                 try:
1133                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1134                         continue
1135                     if table[u"include-tests"] == u"MRR":
1136                         result = tst_data[u"result"][u"receive-rate"]
1137                     elif table[u"include-tests"] == u"PDR":
1138                         result = \
1139                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1140                     elif table[u"include-tests"] == u"NDR":
1141                         result = \
1142                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1143                     else:
1144                         result = None
1145                     if result is not None:
1146                         tbl_dict[tst_name_mod][u"ref-data"].append(
1147                             result)
1148                 except (KeyError, TypeError):
1149                     continue
1150
1151     tbl_lst = list()
1152     for tst_name in tbl_dict:
1153         item = [tbl_dict[tst_name][u"name"], ]
1154         data_r = tbl_dict[tst_name][u"ref-data"]
1155         if data_r:
1156             data_r_mean = mean(data_r)
1157             item.append(round(data_r_mean / 1000000, 2))
1158             data_r_stdev = stdev(data_r)
1159             item.append(round(data_r_stdev / 1000000, 2))
1160         else:
1161             data_r_mean = None
1162             data_r_stdev = None
1163             item.extend([None, None])
1164         data_c = tbl_dict[tst_name][u"cmp-data"]
1165         if data_c:
1166             data_c_mean = mean(data_c)
1167             item.append(round(data_c_mean / 1000000, 2))
1168             data_c_stdev = stdev(data_c)
1169             item.append(round(data_c_stdev / 1000000, 2))
1170         else:
1171             data_c_mean = None
1172             data_c_stdev = None
1173             item.extend([None, None])
1174         if data_r_mean and data_c_mean:
1175             delta, d_stdev = relative_change_stdev(
1176                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1177             item.append(round(delta, 2))
1178             item.append(round(d_stdev, 2))
1179             tbl_lst.append(item)
1180
1181     # Sort the table according to the relative change
1182     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1183
1184     # Generate csv tables:
1185     csv_file = f"{table[u'output-file']}.csv"
1186     with open(csv_file, u"wt") as file_handler:
1187         file_handler.write(header_str)
1188         for test in tbl_lst:
1189             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1190
1191     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1192
1193     # Generate html table:
1194     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1195
1196
1197 def table_perf_trending_dash(table, input_data):
1198     """Generate the table(s) with algorithm:
1199     table_perf_trending_dash
1200     specified in the specification file.
1201
1202     :param table: Table to generate.
1203     :param input_data: Data to process.
1204     :type table: pandas.Series
1205     :type input_data: InputData
1206     """
1207
1208     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1209
1210     # Transform the data
1211     logging.info(
1212         f"    Creating the data set for the {table.get(u'type', u'')} "
1213         f"{table.get(u'title', u'')}."
1214     )
1215     data = input_data.filter_data(table, continue_on_error=True)
1216
1217     # Prepare the header of the tables
1218     header = [
1219         u"Test Case",
1220         u"Trend [Mpps]",
1221         u"Short-Term Change [%]",
1222         u"Long-Term Change [%]",
1223         u"Regressions [#]",
1224         u"Progressions [#]"
1225     ]
1226     header_str = u",".join(header) + u"\n"
1227
1228     # Prepare data to the table:
1229     tbl_dict = dict()
1230     for job, builds in table[u"data"].items():
1231         for build in builds:
1232             for tst_name, tst_data in data[job][str(build)].items():
1233                 if tst_name.lower() in table.get(u"ignore-list", list()):
1234                     continue
1235                 if tbl_dict.get(tst_name, None) is None:
1236                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1237                     if not groups:
1238                         continue
1239                     nic = groups.group(0)
1240                     tbl_dict[tst_name] = {
1241                         u"name": f"{nic}-{tst_data[u'name']}",
1242                         u"data": OrderedDict()
1243                     }
1244                 try:
1245                     tbl_dict[tst_name][u"data"][str(build)] = \
1246                         tst_data[u"result"][u"receive-rate"]
1247                 except (TypeError, KeyError):
1248                     pass  # No data in output.xml for this test
1249
1250     tbl_lst = list()
1251     for tst_name in tbl_dict:
1252         data_t = tbl_dict[tst_name][u"data"]
1253         if len(data_t) < 2:
1254             continue
1255
1256         classification_lst, avgs = classify_anomalies(data_t)
1257
1258         win_size = min(len(data_t), table[u"window"])
1259         long_win_size = min(len(data_t), table[u"long-trend-window"])
1260
1261         try:
1262             max_long_avg = max(
1263                 [x for x in avgs[-long_win_size:-win_size]
1264                  if not isnan(x)])
1265         except ValueError:
1266             max_long_avg = nan
1267         last_avg = avgs[-1]
1268         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1269
1270         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1271             rel_change_last = nan
1272         else:
1273             rel_change_last = round(
1274                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1275
1276         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1277             rel_change_long = nan
1278         else:
1279             rel_change_long = round(
1280                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1281
1282         if classification_lst:
1283             if isnan(rel_change_last) and isnan(rel_change_long):
1284                 continue
1285             if isnan(last_avg) or isnan(rel_change_last) or \
1286                     isnan(rel_change_long):
1287                 continue
1288             tbl_lst.append(
1289                 [tbl_dict[tst_name][u"name"],
1290                  round(last_avg / 1000000, 2),
1291                  rel_change_last,
1292                  rel_change_long,
1293                  classification_lst[-win_size:].count(u"regression"),
1294                  classification_lst[-win_size:].count(u"progression")])
1295
1296     tbl_lst.sort(key=lambda rel: rel[0])
1297
1298     tbl_sorted = list()
1299     for nrr in range(table[u"window"], -1, -1):
1300         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1301         for nrp in range(table[u"window"], -1, -1):
1302             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1303             tbl_out.sort(key=lambda rel: rel[2])
1304             tbl_sorted.extend(tbl_out)
1305
1306     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1307
1308     logging.info(f"    Writing file: {file_name}")
1309     with open(file_name, u"wt") as file_handler:
1310         file_handler.write(header_str)
1311         for test in tbl_sorted:
1312             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1313
1314     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1315     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1316
1317
1318 def _generate_url(testbed, test_name):
1319     """Generate URL to a trending plot from the name of the test case.
1320
1321     :param testbed: The testbed used for testing.
1322     :param test_name: The name of the test case.
1323     :type testbed: str
1324     :type test_name: str
1325     :returns: The URL to the plot with the trending data for the given test
1326         case.
1327     :rtype str
1328     """
1329
1330     if u"x520" in test_name:
1331         nic = u"x520"
1332     elif u"x710" in test_name:
1333         nic = u"x710"
1334     elif u"xl710" in test_name:
1335         nic = u"xl710"
1336     elif u"xxv710" in test_name:
1337         nic = u"xxv710"
1338     elif u"vic1227" in test_name:
1339         nic = u"vic1227"
1340     elif u"vic1385" in test_name:
1341         nic = u"vic1385"
1342     elif u"x553" in test_name:
1343         nic = u"x553"
1344     else:
1345         nic = u""
1346
1347     if u"64b" in test_name:
1348         frame_size = u"64b"
1349     elif u"78b" in test_name:
1350         frame_size = u"78b"
1351     elif u"imix" in test_name:
1352         frame_size = u"imix"
1353     elif u"9000b" in test_name:
1354         frame_size = u"9000b"
1355     elif u"1518b" in test_name:
1356         frame_size = u"1518b"
1357     elif u"114b" in test_name:
1358         frame_size = u"114b"
1359     else:
1360         frame_size = u""
1361
1362     if u"1t1c" in test_name or \
1363         (u"-1c-" in test_name and
1364          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1365         cores = u"1t1c"
1366     elif u"2t2c" in test_name or \
1367          (u"-2c-" in test_name and
1368           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1369         cores = u"2t2c"
1370     elif u"4t4c" in test_name or \
1371          (u"-4c-" in test_name and
1372           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1373         cores = u"4t4c"
1374     elif u"2t1c" in test_name or \
1375          (u"-1c-" in test_name and
1376           testbed in (u"2n-skx", u"3n-skx")):
1377         cores = u"2t1c"
1378     elif u"4t2c" in test_name:
1379         cores = u"4t2c"
1380     elif u"8t4c" in test_name:
1381         cores = u"8t4c"
1382     else:
1383         cores = u""
1384
1385     if u"testpmd" in test_name:
1386         driver = u"testpmd"
1387     elif u"l3fwd" in test_name:
1388         driver = u"l3fwd"
1389     elif u"avf" in test_name:
1390         driver = u"avf"
1391     elif u"rdma" in test_name:
1392         driver = u"rdma"
1393     elif u"dnv" in testbed or u"tsh" in testbed:
1394         driver = u"ixgbe"
1395     else:
1396         driver = u"i40e"
1397
1398     if u"acl" in test_name or \
1399             u"macip" in test_name or \
1400             u"nat" in test_name or \
1401             u"policer" in test_name or \
1402             u"cop" in test_name:
1403         bsf = u"features"
1404     elif u"scale" in test_name:
1405         bsf = u"scale"
1406     elif u"base" in test_name:
1407         bsf = u"base"
1408     else:
1409         bsf = u"base"
1410
1411     if u"114b" in test_name and u"vhost" in test_name:
1412         domain = u"vts"
1413     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1414         domain = u"dpdk"
1415     elif u"memif" in test_name:
1416         domain = u"container_memif"
1417     elif u"srv6" in test_name:
1418         domain = u"srv6"
1419     elif u"vhost" in test_name:
1420         domain = u"vhost"
1421         if u"vppl2xc" in test_name:
1422             driver += u"-vpp"
1423         else:
1424             driver += u"-testpmd"
1425         if u"lbvpplacp" in test_name:
1426             bsf += u"-link-bonding"
1427     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1428         domain = u"nf_service_density_vnfc"
1429     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1430         domain = u"nf_service_density_cnfc"
1431     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1432         domain = u"nf_service_density_cnfp"
1433     elif u"ipsec" in test_name:
1434         domain = u"ipsec"
1435         if u"sw" in test_name:
1436             bsf += u"-sw"
1437         elif u"hw" in test_name:
1438             bsf += u"-hw"
1439     elif u"ethip4vxlan" in test_name:
1440         domain = u"ip4_tunnels"
1441     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1442         domain = u"ip4"
1443     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1444         domain = u"ip6"
1445     elif u"l2xcbase" in test_name or \
1446             u"l2xcscale" in test_name or \
1447             u"l2bdbasemaclrn" in test_name or \
1448             u"l2bdscale" in test_name or \
1449             u"l2patch" in test_name:
1450         domain = u"l2"
1451     else:
1452         domain = u""
1453
1454     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1455     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1456
1457     return file_name + anchor_name
1458
1459
1460 def table_perf_trending_dash_html(table, input_data):
1461     """Generate the table(s) with algorithm:
1462     table_perf_trending_dash_html specified in the specification
1463     file.
1464
1465     :param table: Table to generate.
1466     :param input_data: Data to process.
1467     :type table: dict
1468     :type input_data: InputData
1469     """
1470
1471     _ = input_data
1472
1473     if not table.get(u"testbed", None):
1474         logging.error(
1475             f"The testbed is not defined for the table "
1476             f"{table.get(u'title', u'')}."
1477         )
1478         return
1479
1480     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1481
1482     try:
1483         with open(table[u"input-file"], u'rt') as csv_file:
1484             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1485     except KeyError:
1486         logging.warning(u"The input file is not defined.")
1487         return
1488     except csv.Error as err:
1489         logging.warning(
1490             f"Not possible to process the file {table[u'input-file']}.\n"
1491             f"{repr(err)}"
1492         )
1493         return
1494
1495     # Table:
1496     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1497
1498     # Table header:
1499     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1500     for idx, item in enumerate(csv_lst[0]):
1501         alignment = u"left" if idx == 0 else u"center"
1502         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1503         thead.text = item
1504
1505     # Rows:
1506     colors = {
1507         u"regression": (
1508             u"#ffcccc",
1509             u"#ff9999"
1510         ),
1511         u"progression": (
1512             u"#c6ecc6",
1513             u"#9fdf9f"
1514         ),
1515         u"normal": (
1516             u"#e9f1fb",
1517             u"#d4e4f7"
1518         )
1519     }
1520     for r_idx, row in enumerate(csv_lst[1:]):
1521         if int(row[4]):
1522             color = u"regression"
1523         elif int(row[5]):
1524             color = u"progression"
1525         else:
1526             color = u"normal"
1527         trow = ET.SubElement(
1528             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1529         )
1530
1531         # Columns:
1532         for c_idx, item in enumerate(row):
1533             tdata = ET.SubElement(
1534                 trow,
1535                 u"td",
1536                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1537             )
1538             # Name:
1539             if c_idx == 0:
1540                 ref = ET.SubElement(
1541                     tdata,
1542                     u"a",
1543                     attrib=dict(
1544                         href=f"../trending/"
1545                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1546                     )
1547                 )
1548                 ref.text = item
1549             else:
1550                 tdata.text = item
1551     try:
1552         with open(table[u"output-file"], u'w') as html_file:
1553             logging.info(f"    Writing file: {table[u'output-file']}")
1554             html_file.write(u".. raw:: html\n\n\t")
1555             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1556             html_file.write(u"\n\t<p><br><br></p>\n")
1557     except KeyError:
1558         logging.warning(u"The output file is not defined.")
1559         return
1560
1561
1562 def table_last_failed_tests(table, input_data):
1563     """Generate the table(s) with algorithm: table_last_failed_tests
1564     specified in the specification file.
1565
1566     :param table: Table to generate.
1567     :param input_data: Data to process.
1568     :type table: pandas.Series
1569     :type input_data: InputData
1570     """
1571
1572     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1573
1574     # Transform the data
1575     logging.info(
1576         f"    Creating the data set for the {table.get(u'type', u'')} "
1577         f"{table.get(u'title', u'')}."
1578     )
1579
1580     data = input_data.filter_data(table, continue_on_error=True)
1581
1582     if data is None or data.empty:
1583         logging.warning(
1584             f"    No data for the {table.get(u'type', u'')} "
1585             f"{table.get(u'title', u'')}."
1586         )
1587         return
1588
1589     tbl_list = list()
1590     for job, builds in table[u"data"].items():
1591         for build in builds:
1592             build = str(build)
1593             try:
1594                 version = input_data.metadata(job, build).get(u"version", u"")
1595             except KeyError:
1596                 logging.error(f"Data for {job}: {build} is not present.")
1597                 return
1598             tbl_list.append(build)
1599             tbl_list.append(version)
1600             failed_tests = list()
1601             passed = 0
1602             failed = 0
1603             for tst_data in data[job][build].values:
1604                 if tst_data[u"status"] != u"FAIL":
1605                     passed += 1
1606                     continue
1607                 failed += 1
1608                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1609                 if not groups:
1610                     continue
1611                 nic = groups.group(0)
1612                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1613             tbl_list.append(str(passed))
1614             tbl_list.append(str(failed))
1615             tbl_list.extend(failed_tests)
1616
1617     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1618     logging.info(f"    Writing file: {file_name}")
1619     with open(file_name, u"wt") as file_handler:
1620         for test in tbl_list:
1621             file_handler.write(test + u'\n')
1622
1623
1624 def table_failed_tests(table, input_data):
1625     """Generate the table(s) with algorithm: table_failed_tests
1626     specified in the specification file.
1627
1628     :param table: Table to generate.
1629     :param input_data: Data to process.
1630     :type table: pandas.Series
1631     :type input_data: InputData
1632     """
1633
1634     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1635
1636     # Transform the data
1637     logging.info(
1638         f"    Creating the data set for the {table.get(u'type', u'')} "
1639         f"{table.get(u'title', u'')}."
1640     )
1641     data = input_data.filter_data(table, continue_on_error=True)
1642
1643     # Prepare the header of the tables
1644     header = [
1645         u"Test Case",
1646         u"Failures [#]",
1647         u"Last Failure [Time]",
1648         u"Last Failure [VPP-Build-Id]",
1649         u"Last Failure [CSIT-Job-Build-Id]"
1650     ]
1651
1652     # Generate the data for the table according to the model in the table
1653     # specification
1654
1655     now = dt.utcnow()
1656     timeperiod = timedelta(int(table.get(u"window", 7)))
1657
1658     tbl_dict = dict()
1659     for job, builds in table[u"data"].items():
1660         for build in builds:
1661             build = str(build)
1662             for tst_name, tst_data in data[job][build].items():
1663                 if tst_name.lower() in table.get(u"ignore-list", list()):
1664                     continue
1665                 if tbl_dict.get(tst_name, None) is None:
1666                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1667                     if not groups:
1668                         continue
1669                     nic = groups.group(0)
1670                     tbl_dict[tst_name] = {
1671                         u"name": f"{nic}-{tst_data[u'name']}",
1672                         u"data": OrderedDict()
1673                     }
1674                 try:
1675                     generated = input_data.metadata(job, build).\
1676                         get(u"generated", u"")
1677                     if not generated:
1678                         continue
1679                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1680                     if (now - then) <= timeperiod:
1681                         tbl_dict[tst_name][u"data"][build] = (
1682                             tst_data[u"status"],
1683                             generated,
1684                             input_data.metadata(job, build).get(u"version",
1685                                                                 u""),
1686                             build
1687                         )
1688                 except (TypeError, KeyError) as err:
1689                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1690
1691     max_fails = 0
1692     tbl_lst = list()
1693     for tst_data in tbl_dict.values():
1694         fails_nr = 0
1695         fails_last_date = u""
1696         fails_last_vpp = u""
1697         fails_last_csit = u""
1698         for val in tst_data[u"data"].values():
1699             if val[0] == u"FAIL":
1700                 fails_nr += 1
1701                 fails_last_date = val[1]
1702                 fails_last_vpp = val[2]
1703                 fails_last_csit = val[3]
1704         if fails_nr:
1705             max_fails = fails_nr if fails_nr > max_fails else max_fails
1706             tbl_lst.append(
1707                 [
1708                     tst_data[u"name"],
1709                     fails_nr,
1710                     fails_last_date,
1711                     fails_last_vpp,
1712                     f"mrr-daily-build-{fails_last_csit}"
1713                 ]
1714             )
1715
1716     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1717     tbl_sorted = list()
1718     for nrf in range(max_fails, -1, -1):
1719         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1720         tbl_sorted.extend(tbl_fails)
1721
1722     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1723     logging.info(f"    Writing file: {file_name}")
1724     with open(file_name, u"wt") as file_handler:
1725         file_handler.write(u",".join(header) + u"\n")
1726         for test in tbl_sorted:
1727             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1728
1729     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1730     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1731
1732
1733 def table_failed_tests_html(table, input_data):
1734     """Generate the table(s) with algorithm: table_failed_tests_html
1735     specified in the specification file.
1736
1737     :param table: Table to generate.
1738     :param input_data: Data to process.
1739     :type table: pandas.Series
1740     :type input_data: InputData
1741     """
1742
1743     _ = input_data
1744
1745     if not table.get(u"testbed", None):
1746         logging.error(
1747             f"The testbed is not defined for the table "
1748             f"{table.get(u'title', u'')}."
1749         )
1750         return
1751
1752     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1753
1754     try:
1755         with open(table[u"input-file"], u'rt') as csv_file:
1756             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1757     except KeyError:
1758         logging.warning(u"The input file is not defined.")
1759         return
1760     except csv.Error as err:
1761         logging.warning(
1762             f"Not possible to process the file {table[u'input-file']}.\n"
1763             f"{repr(err)}"
1764         )
1765         return
1766
1767     # Table:
1768     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1769
1770     # Table header:
1771     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1772     for idx, item in enumerate(csv_lst[0]):
1773         alignment = u"left" if idx == 0 else u"center"
1774         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1775         thead.text = item
1776
1777     # Rows:
1778     colors = (u"#e9f1fb", u"#d4e4f7")
1779     for r_idx, row in enumerate(csv_lst[1:]):
1780         background = colors[r_idx % 2]
1781         trow = ET.SubElement(
1782             failed_tests, u"tr", attrib=dict(bgcolor=background)
1783         )
1784
1785         # Columns:
1786         for c_idx, item in enumerate(row):
1787             tdata = ET.SubElement(
1788                 trow,
1789                 u"td",
1790                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1791             )
1792             # Name:
1793             if c_idx == 0:
1794                 ref = ET.SubElement(
1795                     tdata,
1796                     u"a",
1797                     attrib=dict(
1798                         href=f"../trending/"
1799                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1800                     )
1801                 )
1802                 ref.text = item
1803             else:
1804                 tdata.text = item
1805     try:
1806         with open(table[u"output-file"], u'w') as html_file:
1807             logging.info(f"    Writing file: {table[u'output-file']}")
1808             html_file.write(u".. raw:: html\n\n\t")
1809             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1810             html_file.write(u"\n\t<p><br><br></p>\n")
1811     except KeyError:
1812         logging.warning(u"The output file is not defined.")
1813         return