Trending: Add cx556a to urls
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_details(table, input_data):
76     """Generate the table(s) with algorithm: table_detailed_test_results
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(table)
93
94     # Prepare the header of the tables
95     header = list()
96     for column in table[u"columns"]:
97         header.append(
98             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
99         )
100
101     # Generate the data for the table according to the model in the table
102     # specification
103     job = list(table[u"data"].keys())[0]
104     build = str(table[u"data"][job][0])
105     try:
106         suites = input_data.suites(job, build)
107     except KeyError:
108         logging.error(
109             u"    No data available. The table will not be generated."
110         )
111         return
112
113     for suite in suites.values:
114         # Generate data
115         suite_name = suite[u"name"]
116         table_lst = list()
117         for test in data[job][build].keys():
118             if data[job][build][test][u"parent"] not in suite_name:
119                 continue
120             row_lst = list()
121             for column in table[u"columns"]:
122                 try:
123                     col_data = str(data[job][build][test][column[
124                         u"data"].split(" ")[1]]).replace(u'"', u'""')
125                     if column[u"data"].split(u" ")[1] in \
126                         (u"conf-history", u"show-run"):
127                         col_data = col_data.replace(u" |br| ", u"", 1)
128                         col_data = f" |prein| {col_data[:-5]} |preout| "
129                     row_lst.append(f'"{col_data}"')
130                 except KeyError:
131                     row_lst.append(u"No data")
132             table_lst.append(row_lst)
133
134         # Write the data to file
135         if table_lst:
136             file_name = (
137                 f"{table[u'output-file']}_{suite_name}"
138                 f"{table[u'output-file-ext']}"
139             )
140             logging.info(f"      Writing file: {file_name}")
141             with open(file_name, u"wt") as file_handler:
142                 file_handler.write(u",".join(header) + u"\n")
143                 for item in table_lst:
144                     file_handler.write(u",".join(item) + u"\n")
145
146     logging.info(u"  Done.")
147
148
149 def table_merged_details(table, input_data):
150     """Generate the table(s) with algorithm: table_merged_details
151     specified in the specification file.
152
153     :param table: Table to generate.
154     :param input_data: Data to process.
155     :type table: pandas.Series
156     :type input_data: InputData
157     """
158
159     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
160     # Transform the data
161     logging.info(
162         f"    Creating the data set for the {table.get(u'type', u'')} "
163         f"{table.get(u'title', u'')}."
164     )
165     data = input_data.filter_data(table, continue_on_error=True)
166     data = input_data.merge_data(data)
167     data.sort_index(inplace=True)
168
169     logging.info(
170         f"    Creating the data set for the {table.get(u'type', u'')} "
171         f"{table.get(u'title', u'')}."
172     )
173     suites = input_data.filter_data(
174         table, continue_on_error=True, data_set=u"suites")
175     suites = input_data.merge_data(suites)
176
177     # Prepare the header of the tables
178     header = list()
179     for column in table[u"columns"]:
180         header.append(
181             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
182         )
183
184     for suite in suites.values:
185         # Generate data
186         suite_name = suite[u"name"]
187         table_lst = list()
188         for test in data.keys():
189             if data[test][u"parent"] not in suite_name:
190                 continue
191             row_lst = list()
192             for column in table[u"columns"]:
193                 try:
194                     col_data = str(data[test][column[
195                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
196                     col_data = col_data.replace(
197                         u"No Data", u"Not Captured     "
198                     )
199                     if column[u"data"].split(u" ")[1] in \
200                         (u"conf-history", u"show-run"):
201                         col_data = col_data.replace(u" |br| ", u"", 1)
202                         col_data = f" |prein| {col_data[:-5]} |preout| "
203                     row_lst.append(f'"{col_data}"')
204                 except KeyError:
205                     row_lst.append(u'"Not captured"')
206             table_lst.append(row_lst)
207
208         # Write the data to file
209         if table_lst:
210             file_name = (
211                 f"{table[u'output-file']}_{suite_name}"
212                 f"{table[u'output-file-ext']}"
213             )
214             logging.info(f"      Writing file: {file_name}")
215             with open(file_name, u"wt") as file_handler:
216                 file_handler.write(u",".join(header) + u"\n")
217                 for item in table_lst:
218                     file_handler.write(u",".join(item) + u"\n")
219
220     logging.info(u"  Done.")
221
222
223 def _tpc_modify_test_name(test_name):
224     """Modify a test name by replacing its parts.
225
226     :param test_name: Test name to be modified.
227     :type test_name: str
228     :returns: Modified test name.
229     :rtype: str
230     """
231     test_name_mod = test_name.\
232         replace(u"-ndrpdrdisc", u""). \
233         replace(u"-ndrpdr", u"").\
234         replace(u"-pdrdisc", u""). \
235         replace(u"-ndrdisc", u"").\
236         replace(u"-pdr", u""). \
237         replace(u"-ndr", u""). \
238         replace(u"1t1c", u"1c").\
239         replace(u"2t1c", u"1c"). \
240         replace(u"2t2c", u"2c").\
241         replace(u"4t2c", u"2c"). \
242         replace(u"4t4c", u"4c").\
243         replace(u"8t4c", u"4c")
244
245     return re.sub(REGEX_NIC, u"", test_name_mod)
246
247
248 def _tpc_modify_displayed_test_name(test_name):
249     """Modify a test name which is displayed in a table by replacing its parts.
250
251     :param test_name: Test name to be modified.
252     :type test_name: str
253     :returns: Modified test name.
254     :rtype: str
255     """
256     return test_name.\
257         replace(u"1t1c", u"1c").\
258         replace(u"2t1c", u"1c"). \
259         replace(u"2t2c", u"2c").\
260         replace(u"4t2c", u"2c"). \
261         replace(u"4t4c", u"4c").\
262         replace(u"8t4c", u"4c")
263
264
265 def _tpc_insert_data(target, src, include_tests):
266     """Insert src data to the target structure.
267
268     :param target: Target structure where the data is placed.
269     :param src: Source data to be placed into the target stucture.
270     :param include_tests: Which results will be included (MRR, NDR, PDR).
271     :type target: list
272     :type src: dict
273     :type include_tests: str
274     """
275     try:
276         if include_tests == u"MRR":
277             target.append(src[u"result"][u"receive-rate"])
278         elif include_tests == u"PDR":
279             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
280         elif include_tests == u"NDR":
281             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
282     except (KeyError, TypeError):
283         pass
284
285
286 def _tpc_sort_table(table):
287     """Sort the table this way:
288
289     1. Put "New in CSIT-XXXX" at the first place.
290     2. Put "See footnote" at the second place.
291     3. Sort the rest by "Delta".
292
293     :param table: Table to sort.
294     :type table: list
295     :returns: Sorted table.
296     :rtype: list
297     """
298
299
300     tbl_new = list()
301     tbl_see = list()
302     tbl_delta = list()
303     for item in table:
304         if isinstance(item[-1], str):
305             if u"New in CSIT" in item[-1]:
306                 tbl_new.append(item)
307             elif u"See footnote" in item[-1]:
308                 tbl_see.append(item)
309         else:
310             tbl_delta.append(item)
311
312     # Sort the tables:
313     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
314     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
315     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
316     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
317
318     # Put the tables together:
319     table = list()
320     table.extend(tbl_new)
321     table.extend(tbl_see)
322     table.extend(tbl_delta)
323
324     return table
325
326
327 def _tpc_generate_html_table(header, data, output_file_name):
328     """Generate html table from input data with simple sorting possibility.
329
330     :param header: Table header.
331     :param data: Input data to be included in the table. It is a list of lists.
332         Inner lists are rows in the table. All inner lists must be of the same
333         length. The length of these lists must be the same as the length of the
334         header.
335     :param output_file_name: The name (relative or full path) where the
336         generated html table is written.
337     :type header: list
338     :type data: list of lists
339     :type output_file_name: str
340     """
341
342     df_data = pd.DataFrame(data, columns=header)
343
344     df_sorted = [df_data.sort_values(
345         by=[key, header[0]], ascending=[True, True]
346         if key != header[0] else [False, True]) for key in header]
347     df_sorted_rev = [df_data.sort_values(
348         by=[key, header[0]], ascending=[False, True]
349         if key != header[0] else [True, True]) for key in header]
350     df_sorted.extend(df_sorted_rev)
351
352     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
353                    for idx in range(len(df_data))]]
354     table_header = dict(
355         values=[f"<b>{item}</b>" for item in header],
356         fill_color=u"#7eade7",
357         align=[u"left", u"center"]
358     )
359
360     fig = go.Figure()
361
362     for table in df_sorted:
363         columns = [table.get(col) for col in header]
364         fig.add_trace(
365             go.Table(
366                 columnwidth=[30, 10],
367                 header=table_header,
368                 cells=dict(
369                     values=columns,
370                     fill_color=fill_color,
371                     align=[u"left", u"right"]
372                 )
373             )
374         )
375
376     buttons = list()
377     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
378     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
379     menu_items.extend(menu_items_rev)
380     for idx, hdr in enumerate(menu_items):
381         visible = [False, ] * len(menu_items)
382         visible[idx] = True
383         buttons.append(
384             dict(
385                 label=hdr.replace(u" [Mpps]", u""),
386                 method=u"update",
387                 args=[{u"visible": visible}],
388             )
389         )
390
391     fig.update_layout(
392         updatemenus=[
393             go.layout.Updatemenu(
394                 type=u"dropdown",
395                 direction=u"down",
396                 x=0.03,
397                 xanchor=u"left",
398                 y=1.045,
399                 yanchor=u"top",
400                 active=len(menu_items) - 1,
401                 buttons=list(buttons)
402             )
403         ],
404         annotations=[
405             go.layout.Annotation(
406                 text=u"<b>Sort by:</b>",
407                 x=0,
408                 xref=u"paper",
409                 y=1.035,
410                 yref=u"paper",
411                 align=u"left",
412                 showarrow=False
413             )
414         ]
415     )
416
417     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
418
419
420 def table_perf_comparison(table, input_data):
421     """Generate the table(s) with algorithm: table_perf_comparison
422     specified in the specification file.
423
424     :param table: Table to generate.
425     :param input_data: Data to process.
426     :type table: pandas.Series
427     :type input_data: InputData
428     """
429
430     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
431
432     # Transform the data
433     logging.info(
434         f"    Creating the data set for the {table.get(u'type', u'')} "
435         f"{table.get(u'title', u'')}."
436     )
437     data = input_data.filter_data(table, continue_on_error=True)
438
439     # Prepare the header of the tables
440     try:
441         header = [u"Test case", ]
442
443         if table[u"include-tests"] == u"MRR":
444             hdr_param = u"Rec Rate"
445         else:
446             hdr_param = u"Thput"
447
448         history = table.get(u"history", list())
449         for item in history:
450             header.extend(
451                 [
452                     f"{item[u'title']} {hdr_param} [Mpps]",
453                     f"{item[u'title']} Stdev [Mpps]"
454                 ]
455             )
456         header.extend(
457             [
458                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
459                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
460                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
461                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
462                 u"Delta [%]"
463             ]
464         )
465         header_str = u",".join(header) + u"\n"
466     except (AttributeError, KeyError) as err:
467         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
468         return
469
470     # Prepare data to the table:
471     tbl_dict = dict()
472     # topo = ""
473     for job, builds in table[u"reference"][u"data"].items():
474         # topo = u"2n-skx" if u"2n-skx" in job else u""
475         for build in builds:
476             for tst_name, tst_data in data[job][str(build)].items():
477                 tst_name_mod = _tpc_modify_test_name(tst_name)
478                 if u"across topologies" in table[u"title"].lower():
479                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
480                 if tbl_dict.get(tst_name_mod, None) is None:
481                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
482                     nic = groups.group(0) if groups else u""
483                     name = \
484                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
485                     if u"across testbeds" in table[u"title"].lower() or \
486                             u"across topologies" in table[u"title"].lower():
487                         name = _tpc_modify_displayed_test_name(name)
488                     tbl_dict[tst_name_mod] = {
489                         u"name": name,
490                         u"ref-data": list(),
491                         u"cmp-data": list()
492                     }
493                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
494                                  src=tst_data,
495                                  include_tests=table[u"include-tests"])
496
497     replacement = table[u"reference"].get(u"data-replacement", None)
498     if replacement:
499         create_new_list = True
500         rpl_data = input_data.filter_data(
501             table, data=replacement, continue_on_error=True)
502         for job, builds in replacement.items():
503             for build in builds:
504                 for tst_name, tst_data in rpl_data[job][str(build)].items():
505                     tst_name_mod = _tpc_modify_test_name(tst_name)
506                     if u"across topologies" in table[u"title"].lower():
507                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
508                     if tbl_dict.get(tst_name_mod, None) is None:
509                         name = \
510                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
511                         if u"across testbeds" in table[u"title"].lower() or \
512                                 u"across topologies" in table[u"title"].lower():
513                             name = _tpc_modify_displayed_test_name(name)
514                         tbl_dict[tst_name_mod] = {
515                             u"name": name,
516                             u"ref-data": list(),
517                             u"cmp-data": list()
518                         }
519                     if create_new_list:
520                         create_new_list = False
521                         tbl_dict[tst_name_mod][u"ref-data"] = list()
522
523                     _tpc_insert_data(
524                         target=tbl_dict[tst_name_mod][u"ref-data"],
525                         src=tst_data,
526                         include_tests=table[u"include-tests"]
527                     )
528
529     for job, builds in table[u"compare"][u"data"].items():
530         for build in builds:
531             for tst_name, tst_data in data[job][str(build)].items():
532                 tst_name_mod = _tpc_modify_test_name(tst_name)
533                 if u"across topologies" in table[u"title"].lower():
534                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
535                 if tbl_dict.get(tst_name_mod, None) is None:
536                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
537                     nic = groups.group(0) if groups else u""
538                     name = \
539                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
540                     if u"across testbeds" in table[u"title"].lower() or \
541                             u"across topologies" in table[u"title"].lower():
542                         name = _tpc_modify_displayed_test_name(name)
543                     tbl_dict[tst_name_mod] = {
544                         u"name": name,
545                         u"ref-data": list(),
546                         u"cmp-data": list()
547                     }
548                 _tpc_insert_data(
549                     target=tbl_dict[tst_name_mod][u"cmp-data"],
550                     src=tst_data,
551                     include_tests=table[u"include-tests"]
552                 )
553
554     replacement = table[u"compare"].get(u"data-replacement", None)
555     if replacement:
556         create_new_list = True
557         rpl_data = input_data.filter_data(
558             table, data=replacement, continue_on_error=True)
559         for job, builds in replacement.items():
560             for build in builds:
561                 for tst_name, tst_data in rpl_data[job][str(build)].items():
562                     tst_name_mod = _tpc_modify_test_name(tst_name)
563                     if u"across topologies" in table[u"title"].lower():
564                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
565                     if tbl_dict.get(tst_name_mod, None) is None:
566                         name = \
567                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
568                         if u"across testbeds" in table[u"title"].lower() or \
569                                 u"across topologies" in table[u"title"].lower():
570                             name = _tpc_modify_displayed_test_name(name)
571                         tbl_dict[tst_name_mod] = {
572                             u"name": name,
573                             u"ref-data": list(),
574                             u"cmp-data": list()
575                         }
576                     if create_new_list:
577                         create_new_list = False
578                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
579
580                     _tpc_insert_data(
581                         target=tbl_dict[tst_name_mod][u"cmp-data"],
582                         src=tst_data,
583                         include_tests=table[u"include-tests"]
584                     )
585
586     for item in history:
587         for job, builds in item[u"data"].items():
588             for build in builds:
589                 for tst_name, tst_data in data[job][str(build)].items():
590                     tst_name_mod = _tpc_modify_test_name(tst_name)
591                     if u"across topologies" in table[u"title"].lower():
592                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
593                     if tbl_dict.get(tst_name_mod, None) is None:
594                         continue
595                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
596                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
597                     if tbl_dict[tst_name_mod][u"history"].\
598                             get(item[u"title"], None) is None:
599                         tbl_dict[tst_name_mod][u"history"][item[
600                             u"title"]] = list()
601                     try:
602                         if table[u"include-tests"] == u"MRR":
603                             res = tst_data[u"result"][u"receive-rate"]
604                         elif table[u"include-tests"] == u"PDR":
605                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
606                         elif table[u"include-tests"] == u"NDR":
607                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
608                         else:
609                             continue
610                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
611                             append(res)
612                     except (TypeError, KeyError):
613                         pass
614
615     tbl_lst = list()
616     footnote = False
617     for tst_name in tbl_dict:
618         item = [tbl_dict[tst_name][u"name"], ]
619         if history:
620             if tbl_dict[tst_name].get(u"history", None) is not None:
621                 for hist_data in tbl_dict[tst_name][u"history"].values():
622                     if hist_data:
623                         item.append(round(mean(hist_data) / 1000000, 2))
624                         item.append(round(stdev(hist_data) / 1000000, 2))
625                     else:
626                         item.extend([u"Not tested", u"Not tested"])
627             else:
628                 item.extend([u"Not tested", u"Not tested"])
629         data_t = tbl_dict[tst_name][u"ref-data"]
630         if data_t:
631             item.append(round(mean(data_t) / 1000000, 2))
632             item.append(round(stdev(data_t) / 1000000, 2))
633         else:
634             item.extend([u"Not tested", u"Not tested"])
635         data_t = tbl_dict[tst_name][u"cmp-data"]
636         if data_t:
637             item.append(round(mean(data_t) / 1000000, 2))
638             item.append(round(stdev(data_t) / 1000000, 2))
639         else:
640             item.extend([u"Not tested", u"Not tested"])
641         if item[-2] == u"Not tested":
642             pass
643         elif item[-4] == u"Not tested":
644             item.append(u"New in CSIT-2001")
645         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
646         #     item.append(u"See footnote [1]")
647         #     footnote = True
648         elif item[-4] != 0:
649             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
650         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
651             tbl_lst.append(item)
652
653     tbl_lst = _tpc_sort_table(tbl_lst)
654
655     # Generate csv tables:
656     csv_file = f"{table[u'output-file']}.csv"
657     with open(csv_file, u"wt") as file_handler:
658         file_handler.write(header_str)
659         for test in tbl_lst:
660             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
661
662     txt_file_name = f"{table[u'output-file']}.txt"
663     convert_csv_to_pretty_txt(csv_file, txt_file_name)
664
665     if footnote:
666         with open(txt_file_name, u'a') as txt_file:
667             txt_file.writelines([
668                 u"\nFootnotes:\n",
669                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
670                 u"2-node testbeds, dot1q encapsulation is now used on both "
671                 u"links of SUT.\n",
672                 u"    Previously dot1q was used only on a single link with the "
673                 u"other link carrying untagged Ethernet frames. This changes "
674                 u"results\n",
675                 u"    in slightly lower throughput in CSIT-1908 for these "
676                 u"tests. See release notes."
677             ])
678
679     # Generate html table:
680     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
681
682
683 def table_perf_comparison_nic(table, input_data):
684     """Generate the table(s) with algorithm: table_perf_comparison
685     specified in the specification file.
686
687     :param table: Table to generate.
688     :param input_data: Data to process.
689     :type table: pandas.Series
690     :type input_data: InputData
691     """
692
693     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
694
695     # Transform the data
696     logging.info(
697         f"    Creating the data set for the {table.get(u'type', u'')} "
698         f"{table.get(u'title', u'')}."
699     )
700     data = input_data.filter_data(table, continue_on_error=True)
701
702     # Prepare the header of the tables
703     try:
704         header = [u"Test case", ]
705
706         if table[u"include-tests"] == u"MRR":
707             hdr_param = u"Rec Rate"
708         else:
709             hdr_param = u"Thput"
710
711         history = table.get(u"history", list())
712         for item in history:
713             header.extend(
714                 [
715                     f"{item[u'title']} {hdr_param} [Mpps]",
716                     f"{item[u'title']} Stdev [Mpps]"
717                 ]
718             )
719         header.extend(
720             [
721                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
722                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
723                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
724                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
725                 u"Delta [%]"
726             ]
727         )
728         header_str = u",".join(header) + u"\n"
729     except (AttributeError, KeyError) as err:
730         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
731         return
732
733     # Prepare data to the table:
734     tbl_dict = dict()
735     # topo = u""
736     for job, builds in table[u"reference"][u"data"].items():
737         # topo = u"2n-skx" if u"2n-skx" in job else u""
738         for build in builds:
739             for tst_name, tst_data in data[job][str(build)].items():
740                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
741                     continue
742                 tst_name_mod = _tpc_modify_test_name(tst_name)
743                 if u"across topologies" in table[u"title"].lower():
744                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
745                 if tbl_dict.get(tst_name_mod, None) is None:
746                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
747                     if u"across testbeds" in table[u"title"].lower() or \
748                             u"across topologies" in table[u"title"].lower():
749                         name = _tpc_modify_displayed_test_name(name)
750                     tbl_dict[tst_name_mod] = {
751                         u"name": name,
752                         u"ref-data": list(),
753                         u"cmp-data": list()
754                     }
755                 _tpc_insert_data(
756                     target=tbl_dict[tst_name_mod][u"ref-data"],
757                     src=tst_data,
758                     include_tests=table[u"include-tests"]
759                 )
760
761     replacement = table[u"reference"].get(u"data-replacement", None)
762     if replacement:
763         create_new_list = True
764         rpl_data = input_data.filter_data(
765             table, data=replacement, continue_on_error=True)
766         for job, builds in replacement.items():
767             for build in builds:
768                 for tst_name, tst_data in rpl_data[job][str(build)].items():
769                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
770                         continue
771                     tst_name_mod = _tpc_modify_test_name(tst_name)
772                     if u"across topologies" in table[u"title"].lower():
773                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
774                     if tbl_dict.get(tst_name_mod, None) is None:
775                         name = \
776                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
777                         if u"across testbeds" in table[u"title"].lower() or \
778                                 u"across topologies" in table[u"title"].lower():
779                             name = _tpc_modify_displayed_test_name(name)
780                         tbl_dict[tst_name_mod] = {
781                             u"name": name,
782                             u"ref-data": list(),
783                             u"cmp-data": list()
784                         }
785                     if create_new_list:
786                         create_new_list = False
787                         tbl_dict[tst_name_mod][u"ref-data"] = list()
788
789                     _tpc_insert_data(
790                         target=tbl_dict[tst_name_mod][u"ref-data"],
791                         src=tst_data,
792                         include_tests=table[u"include-tests"]
793                     )
794
795     for job, builds in table[u"compare"][u"data"].items():
796         for build in builds:
797             for tst_name, tst_data in data[job][str(build)].items():
798                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
799                     continue
800                 tst_name_mod = _tpc_modify_test_name(tst_name)
801                 if u"across topologies" in table[u"title"].lower():
802                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
803                 if tbl_dict.get(tst_name_mod, None) is None:
804                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
805                     if u"across testbeds" in table[u"title"].lower() or \
806                             u"across topologies" in table[u"title"].lower():
807                         name = _tpc_modify_displayed_test_name(name)
808                     tbl_dict[tst_name_mod] = {
809                         u"name": name,
810                         u"ref-data": list(),
811                         u"cmp-data": list()
812                     }
813                 _tpc_insert_data(
814                     target=tbl_dict[tst_name_mod][u"cmp-data"],
815                     src=tst_data,
816                     include_tests=table[u"include-tests"]
817                 )
818
819     replacement = table[u"compare"].get(u"data-replacement", None)
820     if replacement:
821         create_new_list = True
822         rpl_data = input_data.filter_data(
823             table, data=replacement, continue_on_error=True)
824         for job, builds in replacement.items():
825             for build in builds:
826                 for tst_name, tst_data in rpl_data[job][str(build)].items():
827                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
828                         continue
829                     tst_name_mod = _tpc_modify_test_name(tst_name)
830                     if u"across topologies" in table[u"title"].lower():
831                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
832                     if tbl_dict.get(tst_name_mod, None) is None:
833                         name = \
834                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
835                         if u"across testbeds" in table[u"title"].lower() or \
836                                 u"across topologies" in table[u"title"].lower():
837                             name = _tpc_modify_displayed_test_name(name)
838                         tbl_dict[tst_name_mod] = {
839                             u"name": name,
840                             u"ref-data": list(),
841                             u"cmp-data": list()
842                         }
843                     if create_new_list:
844                         create_new_list = False
845                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
846
847                     _tpc_insert_data(
848                         target=tbl_dict[tst_name_mod][u"cmp-data"],
849                         src=tst_data,
850                         include_tests=table[u"include-tests"]
851                     )
852
853     for item in history:
854         for job, builds in item[u"data"].items():
855             for build in builds:
856                 for tst_name, tst_data in data[job][str(build)].items():
857                     if item[u"nic"] not in tst_data[u"tags"]:
858                         continue
859                     tst_name_mod = _tpc_modify_test_name(tst_name)
860                     if u"across topologies" in table[u"title"].lower():
861                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
862                     if tbl_dict.get(tst_name_mod, None) is None:
863                         continue
864                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
865                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
866                     if tbl_dict[tst_name_mod][u"history"].\
867                             get(item[u"title"], None) is None:
868                         tbl_dict[tst_name_mod][u"history"][item[
869                             u"title"]] = list()
870                     try:
871                         if table[u"include-tests"] == u"MRR":
872                             res = tst_data[u"result"][u"receive-rate"]
873                         elif table[u"include-tests"] == u"PDR":
874                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
875                         elif table[u"include-tests"] == u"NDR":
876                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
877                         else:
878                             continue
879                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
880                             append(res)
881                     except (TypeError, KeyError):
882                         pass
883
884     tbl_lst = list()
885     footnote = False
886     for tst_name in tbl_dict:
887         item = [tbl_dict[tst_name][u"name"], ]
888         if history:
889             if tbl_dict[tst_name].get(u"history", None) is not None:
890                 for hist_data in tbl_dict[tst_name][u"history"].values():
891                     if hist_data:
892                         item.append(round(mean(hist_data) / 1000000, 2))
893                         item.append(round(stdev(hist_data) / 1000000, 2))
894                     else:
895                         item.extend([u"Not tested", u"Not tested"])
896             else:
897                 item.extend([u"Not tested", u"Not tested"])
898         data_t = tbl_dict[tst_name][u"ref-data"]
899         if data_t:
900             item.append(round(mean(data_t) / 1000000, 2))
901             item.append(round(stdev(data_t) / 1000000, 2))
902         else:
903             item.extend([u"Not tested", u"Not tested"])
904         data_t = tbl_dict[tst_name][u"cmp-data"]
905         if data_t:
906             item.append(round(mean(data_t) / 1000000, 2))
907             item.append(round(stdev(data_t) / 1000000, 2))
908         else:
909             item.extend([u"Not tested", u"Not tested"])
910         if item[-2] == u"Not tested":
911             pass
912         elif item[-4] == u"Not tested":
913             item.append(u"New in CSIT-2001")
914         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
915         #     item.append(u"See footnote [1]")
916         #     footnote = True
917         elif item[-4] != 0:
918             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
919         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
920             tbl_lst.append(item)
921
922     tbl_lst = _tpc_sort_table(tbl_lst)
923
924     # Generate csv tables:
925     csv_file = f"{table[u'output-file']}.csv"
926     with open(csv_file, u"wt") as file_handler:
927         file_handler.write(header_str)
928         for test in tbl_lst:
929             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
930
931     txt_file_name = f"{table[u'output-file']}.txt"
932     convert_csv_to_pretty_txt(csv_file, txt_file_name)
933
934     if footnote:
935         with open(txt_file_name, u'a') as txt_file:
936             txt_file.writelines([
937                 u"\nFootnotes:\n",
938                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
939                 u"2-node testbeds, dot1q encapsulation is now used on both "
940                 u"links of SUT.\n",
941                 u"    Previously dot1q was used only on a single link with the "
942                 u"other link carrying untagged Ethernet frames. This changes "
943                 u"results\n",
944                 u"    in slightly lower throughput in CSIT-1908 for these "
945                 u"tests. See release notes."
946             ])
947
948     # Generate html table:
949     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
950
951
952 def table_nics_comparison(table, input_data):
953     """Generate the table(s) with algorithm: table_nics_comparison
954     specified in the specification file.
955
956     :param table: Table to generate.
957     :param input_data: Data to process.
958     :type table: pandas.Series
959     :type input_data: InputData
960     """
961
962     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
963
964     # Transform the data
965     logging.info(
966         f"    Creating the data set for the {table.get(u'type', u'')} "
967         f"{table.get(u'title', u'')}."
968     )
969     data = input_data.filter_data(table, continue_on_error=True)
970
971     # Prepare the header of the tables
972     try:
973         header = [u"Test case", ]
974
975         if table[u"include-tests"] == u"MRR":
976             hdr_param = u"Rec Rate"
977         else:
978             hdr_param = u"Thput"
979
980         header.extend(
981             [
982                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
983                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
984                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
985                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
986                 u"Delta [%]"
987             ]
988         )
989
990     except (AttributeError, KeyError) as err:
991         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
992         return
993
994     # Prepare data to the table:
995     tbl_dict = dict()
996     for job, builds in table[u"data"].items():
997         for build in builds:
998             for tst_name, tst_data in data[job][str(build)].items():
999                 tst_name_mod = _tpc_modify_test_name(tst_name)
1000                 if tbl_dict.get(tst_name_mod, None) is None:
1001                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1002                     tbl_dict[tst_name_mod] = {
1003                         u"name": name,
1004                         u"ref-data": list(),
1005                         u"cmp-data": list()
1006                     }
1007                 try:
1008                     result = None
1009                     if table[u"include-tests"] == u"MRR":
1010                         result = tst_data[u"result"][u"receive-rate"]
1011                     elif table[u"include-tests"] == u"PDR":
1012                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1013                     elif table[u"include-tests"] == u"NDR":
1014                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1015                     else:
1016                         continue
1017
1018                     if result and \
1019                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1020                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1021                     elif result and \
1022                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1023                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1024                 except (TypeError, KeyError) as err:
1025                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1026                     # No data in output.xml for this test
1027
1028     tbl_lst = list()
1029     for tst_name in tbl_dict:
1030         item = [tbl_dict[tst_name][u"name"], ]
1031         data_t = tbl_dict[tst_name][u"ref-data"]
1032         if data_t:
1033             item.append(round(mean(data_t) / 1000000, 2))
1034             item.append(round(stdev(data_t) / 1000000, 2))
1035         else:
1036             item.extend([None, None])
1037         data_t = tbl_dict[tst_name][u"cmp-data"]
1038         if data_t:
1039             item.append(round(mean(data_t) / 1000000, 2))
1040             item.append(round(stdev(data_t) / 1000000, 2))
1041         else:
1042             item.extend([None, None])
1043         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1044             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1045         if len(item) == len(header):
1046             tbl_lst.append(item)
1047
1048     # Sort the table according to the relative change
1049     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1050
1051     # Generate csv tables:
1052     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1053         file_handler.write(u",".join(header) + u"\n")
1054         for test in tbl_lst:
1055             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1056
1057     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1058                               f"{table[u'output-file']}.txt")
1059
1060     # Generate html table:
1061     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1062
1063
1064 def table_soak_vs_ndr(table, input_data):
1065     """Generate the table(s) with algorithm: table_soak_vs_ndr
1066     specified in the specification file.
1067
1068     :param table: Table to generate.
1069     :param input_data: Data to process.
1070     :type table: pandas.Series
1071     :type input_data: InputData
1072     """
1073
1074     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1075
1076     # Transform the data
1077     logging.info(
1078         f"    Creating the data set for the {table.get(u'type', u'')} "
1079         f"{table.get(u'title', u'')}."
1080     )
1081     data = input_data.filter_data(table, continue_on_error=True)
1082
1083     # Prepare the header of the table
1084     try:
1085         header = [
1086             u"Test case",
1087             f"{table[u'reference'][u'title']} Thput [Mpps]",
1088             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1089             f"{table[u'compare'][u'title']} Thput [Mpps]",
1090             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1091             u"Delta [%]", u"Stdev of delta [%]"
1092         ]
1093         header_str = u",".join(header) + u"\n"
1094     except (AttributeError, KeyError) as err:
1095         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1096         return
1097
1098     # Create a list of available SOAK test results:
1099     tbl_dict = dict()
1100     for job, builds in table[u"compare"][u"data"].items():
1101         for build in builds:
1102             for tst_name, tst_data in data[job][str(build)].items():
1103                 if tst_data[u"type"] == u"SOAK":
1104                     tst_name_mod = tst_name.replace(u"-soak", u"")
1105                     if tbl_dict.get(tst_name_mod, None) is None:
1106                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1107                         nic = groups.group(0) if groups else u""
1108                         name = (
1109                             f"{nic}-"
1110                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1111                         )
1112                         tbl_dict[tst_name_mod] = {
1113                             u"name": name,
1114                             u"ref-data": list(),
1115                             u"cmp-data": list()
1116                         }
1117                     try:
1118                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1119                             tst_data[u"throughput"][u"LOWER"])
1120                     except (KeyError, TypeError):
1121                         pass
1122     tests_lst = tbl_dict.keys()
1123
1124     # Add corresponding NDR test results:
1125     for job, builds in table[u"reference"][u"data"].items():
1126         for build in builds:
1127             for tst_name, tst_data in data[job][str(build)].items():
1128                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1129                     replace(u"-mrr", u"")
1130                 if tst_name_mod not in tests_lst:
1131                     continue
1132                 try:
1133                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1134                         continue
1135                     if table[u"include-tests"] == u"MRR":
1136                         result = tst_data[u"result"][u"receive-rate"]
1137                     elif table[u"include-tests"] == u"PDR":
1138                         result = \
1139                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1140                     elif table[u"include-tests"] == u"NDR":
1141                         result = \
1142                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1143                     else:
1144                         result = None
1145                     if result is not None:
1146                         tbl_dict[tst_name_mod][u"ref-data"].append(
1147                             result)
1148                 except (KeyError, TypeError):
1149                     continue
1150
1151     tbl_lst = list()
1152     for tst_name in tbl_dict:
1153         item = [tbl_dict[tst_name][u"name"], ]
1154         data_r = tbl_dict[tst_name][u"ref-data"]
1155         if data_r:
1156             data_r_mean = mean(data_r)
1157             item.append(round(data_r_mean / 1000000, 2))
1158             data_r_stdev = stdev(data_r)
1159             item.append(round(data_r_stdev / 1000000, 2))
1160         else:
1161             data_r_mean = None
1162             data_r_stdev = None
1163             item.extend([None, None])
1164         data_c = tbl_dict[tst_name][u"cmp-data"]
1165         if data_c:
1166             data_c_mean = mean(data_c)
1167             item.append(round(data_c_mean / 1000000, 2))
1168             data_c_stdev = stdev(data_c)
1169             item.append(round(data_c_stdev / 1000000, 2))
1170         else:
1171             data_c_mean = None
1172             data_c_stdev = None
1173             item.extend([None, None])
1174         if data_r_mean and data_c_mean:
1175             delta, d_stdev = relative_change_stdev(
1176                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1177             item.append(round(delta, 2))
1178             item.append(round(d_stdev, 2))
1179             tbl_lst.append(item)
1180
1181     # Sort the table according to the relative change
1182     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1183
1184     # Generate csv tables:
1185     csv_file = f"{table[u'output-file']}.csv"
1186     with open(csv_file, u"wt") as file_handler:
1187         file_handler.write(header_str)
1188         for test in tbl_lst:
1189             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1190
1191     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1192
1193     # Generate html table:
1194     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1195
1196
1197 def table_perf_trending_dash(table, input_data):
1198     """Generate the table(s) with algorithm:
1199     table_perf_trending_dash
1200     specified in the specification file.
1201
1202     :param table: Table to generate.
1203     :param input_data: Data to process.
1204     :type table: pandas.Series
1205     :type input_data: InputData
1206     """
1207
1208     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1209
1210     # Transform the data
1211     logging.info(
1212         f"    Creating the data set for the {table.get(u'type', u'')} "
1213         f"{table.get(u'title', u'')}."
1214     )
1215     data = input_data.filter_data(table, continue_on_error=True)
1216
1217     # Prepare the header of the tables
1218     header = [
1219         u"Test Case",
1220         u"Trend [Mpps]",
1221         u"Short-Term Change [%]",
1222         u"Long-Term Change [%]",
1223         u"Regressions [#]",
1224         u"Progressions [#]"
1225     ]
1226     header_str = u",".join(header) + u"\n"
1227
1228     # Prepare data to the table:
1229     tbl_dict = dict()
1230     for job, builds in table[u"data"].items():
1231         for build in builds:
1232             for tst_name, tst_data in data[job][str(build)].items():
1233                 if tst_name.lower() in table.get(u"ignore-list", list()):
1234                     continue
1235                 if tbl_dict.get(tst_name, None) is None:
1236                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1237                     if not groups:
1238                         continue
1239                     nic = groups.group(0)
1240                     tbl_dict[tst_name] = {
1241                         u"name": f"{nic}-{tst_data[u'name']}",
1242                         u"data": OrderedDict()
1243                     }
1244                 try:
1245                     tbl_dict[tst_name][u"data"][str(build)] = \
1246                         tst_data[u"result"][u"receive-rate"]
1247                 except (TypeError, KeyError):
1248                     pass  # No data in output.xml for this test
1249
1250     tbl_lst = list()
1251     for tst_name in tbl_dict:
1252         data_t = tbl_dict[tst_name][u"data"]
1253         if len(data_t) < 2:
1254             continue
1255
1256         classification_lst, avgs = classify_anomalies(data_t)
1257
1258         win_size = min(len(data_t), table[u"window"])
1259         long_win_size = min(len(data_t), table[u"long-trend-window"])
1260
1261         try:
1262             max_long_avg = max(
1263                 [x for x in avgs[-long_win_size:-win_size]
1264                  if not isnan(x)])
1265         except ValueError:
1266             max_long_avg = nan
1267         last_avg = avgs[-1]
1268         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1269
1270         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1271             rel_change_last = nan
1272         else:
1273             rel_change_last = round(
1274                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1275
1276         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1277             rel_change_long = nan
1278         else:
1279             rel_change_long = round(
1280                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1281
1282         if classification_lst:
1283             if isnan(rel_change_last) and isnan(rel_change_long):
1284                 continue
1285             if isnan(last_avg) or isnan(rel_change_last) or \
1286                     isnan(rel_change_long):
1287                 continue
1288             tbl_lst.append(
1289                 [tbl_dict[tst_name][u"name"],
1290                  round(last_avg / 1000000, 2),
1291                  rel_change_last,
1292                  rel_change_long,
1293                  classification_lst[-win_size:].count(u"regression"),
1294                  classification_lst[-win_size:].count(u"progression")])
1295
1296     tbl_lst.sort(key=lambda rel: rel[0])
1297
1298     tbl_sorted = list()
1299     for nrr in range(table[u"window"], -1, -1):
1300         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1301         for nrp in range(table[u"window"], -1, -1):
1302             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1303             tbl_out.sort(key=lambda rel: rel[2])
1304             tbl_sorted.extend(tbl_out)
1305
1306     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1307
1308     logging.info(f"    Writing file: {file_name}")
1309     with open(file_name, u"wt") as file_handler:
1310         file_handler.write(header_str)
1311         for test in tbl_sorted:
1312             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1313
1314     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1315     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1316
1317
1318 def _generate_url(testbed, test_name):
1319     """Generate URL to a trending plot from the name of the test case.
1320
1321     :param testbed: The testbed used for testing.
1322     :param test_name: The name of the test case.
1323     :type testbed: str
1324     :type test_name: str
1325     :returns: The URL to the plot with the trending data for the given test
1326         case.
1327     :rtype str
1328     """
1329
1330     if u"x520" in test_name:
1331         nic = u"x520"
1332     elif u"x710" in test_name:
1333         nic = u"x710"
1334     elif u"xl710" in test_name:
1335         nic = u"xl710"
1336     elif u"xxv710" in test_name:
1337         nic = u"xxv710"
1338     elif u"vic1227" in test_name:
1339         nic = u"vic1227"
1340     elif u"vic1385" in test_name:
1341         nic = u"vic1385"
1342     elif u"x553" in test_name:
1343         nic = u"x553"
1344     elif u"cx556" in test_name or u"cx556a" in test_name:
1345         nic = u"cx556a"
1346     else:
1347         nic = u""
1348
1349     if u"64b" in test_name:
1350         frame_size = u"64b"
1351     elif u"78b" in test_name:
1352         frame_size = u"78b"
1353     elif u"imix" in test_name:
1354         frame_size = u"imix"
1355     elif u"9000b" in test_name:
1356         frame_size = u"9000b"
1357     elif u"1518b" in test_name:
1358         frame_size = u"1518b"
1359     elif u"114b" in test_name:
1360         frame_size = u"114b"
1361     else:
1362         frame_size = u""
1363
1364     if u"1t1c" in test_name or \
1365         (u"-1c-" in test_name and
1366          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1367         cores = u"1t1c"
1368     elif u"2t2c" in test_name or \
1369          (u"-2c-" in test_name and
1370           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1371         cores = u"2t2c"
1372     elif u"4t4c" in test_name or \
1373          (u"-4c-" in test_name and
1374           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1375         cores = u"4t4c"
1376     elif u"2t1c" in test_name or \
1377          (u"-1c-" in test_name and
1378           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1379         cores = u"2t1c"
1380     elif u"4t2c" in test_name or \
1381          (u"-2c-" in test_name and
1382           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1383         cores = u"4t2c"
1384     elif u"8t4c" in test_name or \
1385          (u"-4c-" in test_name and
1386           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1387         cores = u"8t4c"
1388     else:
1389         cores = u""
1390
1391     if u"testpmd" in test_name:
1392         driver = u"testpmd"
1393     elif u"l3fwd" in test_name:
1394         driver = u"l3fwd"
1395     elif u"avf" in test_name:
1396         driver = u"avf"
1397     elif u"rdma" in test_name:
1398         driver = u"rdma"
1399     elif u"dnv" in testbed or u"tsh" in testbed:
1400         driver = u"ixgbe"
1401     else:
1402         driver = u"i40e"
1403
1404     if u"acl" in test_name or \
1405             u"macip" in test_name or \
1406             u"nat" in test_name or \
1407             u"policer" in test_name or \
1408             u"cop" in test_name:
1409         bsf = u"features"
1410     elif u"scale" in test_name:
1411         bsf = u"scale"
1412     elif u"base" in test_name:
1413         bsf = u"base"
1414     else:
1415         bsf = u"base"
1416
1417     if u"114b" in test_name and u"vhost" in test_name:
1418         domain = u"vts"
1419     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1420         domain = u"dpdk"
1421     elif u"memif" in test_name:
1422         domain = u"container_memif"
1423     elif u"srv6" in test_name:
1424         domain = u"srv6"
1425     elif u"vhost" in test_name:
1426         domain = u"vhost"
1427         if u"vppl2xc" in test_name:
1428             driver += u"-vpp"
1429         else:
1430             driver += u"-testpmd"
1431         if u"lbvpplacp" in test_name:
1432             bsf += u"-link-bonding"
1433     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1434         domain = u"nf_service_density_vnfc"
1435     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1436         domain = u"nf_service_density_cnfc"
1437     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1438         domain = u"nf_service_density_cnfp"
1439     elif u"ipsec" in test_name:
1440         domain = u"ipsec"
1441         if u"sw" in test_name:
1442             bsf += u"-sw"
1443         elif u"hw" in test_name:
1444             bsf += u"-hw"
1445     elif u"ethip4vxlan" in test_name:
1446         domain = u"ip4_tunnels"
1447     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1448         domain = u"ip4"
1449     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1450         domain = u"ip6"
1451     elif u"l2xcbase" in test_name or \
1452             u"l2xcscale" in test_name or \
1453             u"l2bdbasemaclrn" in test_name or \
1454             u"l2bdscale" in test_name or \
1455             u"l2patch" in test_name:
1456         domain = u"l2"
1457     else:
1458         domain = u""
1459
1460     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1461     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1462
1463     return file_name + anchor_name
1464
1465
1466 def table_perf_trending_dash_html(table, input_data):
1467     """Generate the table(s) with algorithm:
1468     table_perf_trending_dash_html specified in the specification
1469     file.
1470
1471     :param table: Table to generate.
1472     :param input_data: Data to process.
1473     :type table: dict
1474     :type input_data: InputData
1475     """
1476
1477     _ = input_data
1478
1479     if not table.get(u"testbed", None):
1480         logging.error(
1481             f"The testbed is not defined for the table "
1482             f"{table.get(u'title', u'')}."
1483         )
1484         return
1485
1486     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1487
1488     try:
1489         with open(table[u"input-file"], u'rt') as csv_file:
1490             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1491     except KeyError:
1492         logging.warning(u"The input file is not defined.")
1493         return
1494     except csv.Error as err:
1495         logging.warning(
1496             f"Not possible to process the file {table[u'input-file']}.\n"
1497             f"{repr(err)}"
1498         )
1499         return
1500
1501     # Table:
1502     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1503
1504     # Table header:
1505     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1506     for idx, item in enumerate(csv_lst[0]):
1507         alignment = u"left" if idx == 0 else u"center"
1508         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1509         thead.text = item
1510
1511     # Rows:
1512     colors = {
1513         u"regression": (
1514             u"#ffcccc",
1515             u"#ff9999"
1516         ),
1517         u"progression": (
1518             u"#c6ecc6",
1519             u"#9fdf9f"
1520         ),
1521         u"normal": (
1522             u"#e9f1fb",
1523             u"#d4e4f7"
1524         )
1525     }
1526     for r_idx, row in enumerate(csv_lst[1:]):
1527         if int(row[4]):
1528             color = u"regression"
1529         elif int(row[5]):
1530             color = u"progression"
1531         else:
1532             color = u"normal"
1533         trow = ET.SubElement(
1534             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1535         )
1536
1537         # Columns:
1538         for c_idx, item in enumerate(row):
1539             tdata = ET.SubElement(
1540                 trow,
1541                 u"td",
1542                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1543             )
1544             # Name:
1545             if c_idx == 0:
1546                 ref = ET.SubElement(
1547                     tdata,
1548                     u"a",
1549                     attrib=dict(
1550                         href=f"../trending/"
1551                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1552                     )
1553                 )
1554                 ref.text = item
1555             else:
1556                 tdata.text = item
1557     try:
1558         with open(table[u"output-file"], u'w') as html_file:
1559             logging.info(f"    Writing file: {table[u'output-file']}")
1560             html_file.write(u".. raw:: html\n\n\t")
1561             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1562             html_file.write(u"\n\t<p><br><br></p>\n")
1563     except KeyError:
1564         logging.warning(u"The output file is not defined.")
1565         return
1566
1567
1568 def table_last_failed_tests(table, input_data):
1569     """Generate the table(s) with algorithm: table_last_failed_tests
1570     specified in the specification file.
1571
1572     :param table: Table to generate.
1573     :param input_data: Data to process.
1574     :type table: pandas.Series
1575     :type input_data: InputData
1576     """
1577
1578     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1579
1580     # Transform the data
1581     logging.info(
1582         f"    Creating the data set for the {table.get(u'type', u'')} "
1583         f"{table.get(u'title', u'')}."
1584     )
1585
1586     data = input_data.filter_data(table, continue_on_error=True)
1587
1588     if data is None or data.empty:
1589         logging.warning(
1590             f"    No data for the {table.get(u'type', u'')} "
1591             f"{table.get(u'title', u'')}."
1592         )
1593         return
1594
1595     tbl_list = list()
1596     for job, builds in table[u"data"].items():
1597         for build in builds:
1598             build = str(build)
1599             try:
1600                 version = input_data.metadata(job, build).get(u"version", u"")
1601             except KeyError:
1602                 logging.error(f"Data for {job}: {build} is not present.")
1603                 return
1604             tbl_list.append(build)
1605             tbl_list.append(version)
1606             failed_tests = list()
1607             passed = 0
1608             failed = 0
1609             for tst_data in data[job][build].values:
1610                 if tst_data[u"status"] != u"FAIL":
1611                     passed += 1
1612                     continue
1613                 failed += 1
1614                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1615                 if not groups:
1616                     continue
1617                 nic = groups.group(0)
1618                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1619             tbl_list.append(str(passed))
1620             tbl_list.append(str(failed))
1621             tbl_list.extend(failed_tests)
1622
1623     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1624     logging.info(f"    Writing file: {file_name}")
1625     with open(file_name, u"wt") as file_handler:
1626         for test in tbl_list:
1627             file_handler.write(test + u'\n')
1628
1629
1630 def table_failed_tests(table, input_data):
1631     """Generate the table(s) with algorithm: table_failed_tests
1632     specified in the specification file.
1633
1634     :param table: Table to generate.
1635     :param input_data: Data to process.
1636     :type table: pandas.Series
1637     :type input_data: InputData
1638     """
1639
1640     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1641
1642     # Transform the data
1643     logging.info(
1644         f"    Creating the data set for the {table.get(u'type', u'')} "
1645         f"{table.get(u'title', u'')}."
1646     )
1647     data = input_data.filter_data(table, continue_on_error=True)
1648
1649     # Prepare the header of the tables
1650     header = [
1651         u"Test Case",
1652         u"Failures [#]",
1653         u"Last Failure [Time]",
1654         u"Last Failure [VPP-Build-Id]",
1655         u"Last Failure [CSIT-Job-Build-Id]"
1656     ]
1657
1658     # Generate the data for the table according to the model in the table
1659     # specification
1660
1661     now = dt.utcnow()
1662     timeperiod = timedelta(int(table.get(u"window", 7)))
1663
1664     tbl_dict = dict()
1665     for job, builds in table[u"data"].items():
1666         for build in builds:
1667             build = str(build)
1668             for tst_name, tst_data in data[job][build].items():
1669                 if tst_name.lower() in table.get(u"ignore-list", list()):
1670                     continue
1671                 if tbl_dict.get(tst_name, None) is None:
1672                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1673                     if not groups:
1674                         continue
1675                     nic = groups.group(0)
1676                     tbl_dict[tst_name] = {
1677                         u"name": f"{nic}-{tst_data[u'name']}",
1678                         u"data": OrderedDict()
1679                     }
1680                 try:
1681                     generated = input_data.metadata(job, build).\
1682                         get(u"generated", u"")
1683                     if not generated:
1684                         continue
1685                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1686                     if (now - then) <= timeperiod:
1687                         tbl_dict[tst_name][u"data"][build] = (
1688                             tst_data[u"status"],
1689                             generated,
1690                             input_data.metadata(job, build).get(u"version",
1691                                                                 u""),
1692                             build
1693                         )
1694                 except (TypeError, KeyError) as err:
1695                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1696
1697     max_fails = 0
1698     tbl_lst = list()
1699     for tst_data in tbl_dict.values():
1700         fails_nr = 0
1701         fails_last_date = u""
1702         fails_last_vpp = u""
1703         fails_last_csit = u""
1704         for val in tst_data[u"data"].values():
1705             if val[0] == u"FAIL":
1706                 fails_nr += 1
1707                 fails_last_date = val[1]
1708                 fails_last_vpp = val[2]
1709                 fails_last_csit = val[3]
1710         if fails_nr:
1711             max_fails = fails_nr if fails_nr > max_fails else max_fails
1712             tbl_lst.append(
1713                 [
1714                     tst_data[u"name"],
1715                     fails_nr,
1716                     fails_last_date,
1717                     fails_last_vpp,
1718                     f"mrr-daily-build-{fails_last_csit}"
1719                 ]
1720             )
1721
1722     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1723     tbl_sorted = list()
1724     for nrf in range(max_fails, -1, -1):
1725         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1726         tbl_sorted.extend(tbl_fails)
1727
1728     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1729     logging.info(f"    Writing file: {file_name}")
1730     with open(file_name, u"wt") as file_handler:
1731         file_handler.write(u",".join(header) + u"\n")
1732         for test in tbl_sorted:
1733             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1734
1735     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1736     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1737
1738
1739 def table_failed_tests_html(table, input_data):
1740     """Generate the table(s) with algorithm: table_failed_tests_html
1741     specified in the specification file.
1742
1743     :param table: Table to generate.
1744     :param input_data: Data to process.
1745     :type table: pandas.Series
1746     :type input_data: InputData
1747     """
1748
1749     _ = input_data
1750
1751     if not table.get(u"testbed", None):
1752         logging.error(
1753             f"The testbed is not defined for the table "
1754             f"{table.get(u'title', u'')}."
1755         )
1756         return
1757
1758     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1759
1760     try:
1761         with open(table[u"input-file"], u'rt') as csv_file:
1762             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1763     except KeyError:
1764         logging.warning(u"The input file is not defined.")
1765         return
1766     except csv.Error as err:
1767         logging.warning(
1768             f"Not possible to process the file {table[u'input-file']}.\n"
1769             f"{repr(err)}"
1770         )
1771         return
1772
1773     # Table:
1774     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1775
1776     # Table header:
1777     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1778     for idx, item in enumerate(csv_lst[0]):
1779         alignment = u"left" if idx == 0 else u"center"
1780         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1781         thead.text = item
1782
1783     # Rows:
1784     colors = (u"#e9f1fb", u"#d4e4f7")
1785     for r_idx, row in enumerate(csv_lst[1:]):
1786         background = colors[r_idx % 2]
1787         trow = ET.SubElement(
1788             failed_tests, u"tr", attrib=dict(bgcolor=background)
1789         )
1790
1791         # Columns:
1792         for c_idx, item in enumerate(row):
1793             tdata = ET.SubElement(
1794                 trow,
1795                 u"td",
1796                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1797             )
1798             # Name:
1799             if c_idx == 0:
1800                 ref = ET.SubElement(
1801                     tdata,
1802                     u"a",
1803                     attrib=dict(
1804                         href=f"../trending/"
1805                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1806                     )
1807                 )
1808                 ref.text = item
1809             else:
1810                 tdata.text = item
1811     try:
1812         with open(table[u"output-file"], u'w') as html_file:
1813             logging.info(f"    Writing file: {table[u'output-file']}")
1814             html_file.write(u".. raw:: html\n\n\t")
1815             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1816             html_file.write(u"\n\t<p><br><br></p>\n")
1817     except KeyError:
1818         logging.warning(u"The output file is not defined.")
1819         return