PAL: Add sortable html table for comparisons
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 import plotly.graph_objects as go
23 import plotly.offline as ploff
24 import pandas as pd
25
26 from string import replace
27 from collections import OrderedDict
28 from numpy import nan, isnan
29 from xml.etree import ElementTree as ET
30 from datetime import datetime as dt
31 from datetime import timedelta
32
33 from utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     logging.info("Generating the tables ...")
50     for table in spec.tables:
51         try:
52             eval(table["algorithm"])(table, data)
53         except NameError as err:
54             logging.error("Probably algorithm '{alg}' is not defined: {err}".
55                           format(alg=table["algorithm"], err=repr(err)))
56     logging.info("Done.")
57
58
59 def table_details(table, input_data):
60     """Generate the table(s) with algorithm: table_detailed_test_results
61     specified in the specification file.
62
63     :param table: Table to generate.
64     :param input_data: Data to process.
65     :type table: pandas.Series
66     :type input_data: InputData
67     """
68
69     logging.info("  Generating the table {0} ...".
70                  format(table.get("title", "")))
71
72     # Transform the data
73     logging.info("    Creating the data set for the {0} '{1}'.".
74                  format(table.get("type", ""), table.get("title", "")))
75     data = input_data.filter_data(table)
76
77     # Prepare the header of the tables
78     header = list()
79     for column in table["columns"]:
80         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
81
82     # Generate the data for the table according to the model in the table
83     # specification
84     job = table["data"].keys()[0]
85     build = str(table["data"][job][0])
86     try:
87         suites = input_data.suites(job, build)
88     except KeyError:
89         logging.error("    No data available. The table will not be generated.")
90         return
91
92     for suite_longname, suite in suites.iteritems():
93         # Generate data
94         suite_name = suite["name"]
95         table_lst = list()
96         for test in data[job][build].keys():
97             if data[job][build][test]["parent"] in suite_name:
98                 row_lst = list()
99                 for column in table["columns"]:
100                     try:
101                         col_data = str(data[job][build][test][column["data"].
102                                        split(" ")[1]]).replace('"', '""')
103                         if column["data"].split(" ")[1] in ("conf-history",
104                                                             "show-run"):
105                             col_data = replace(col_data, " |br| ", "",
106                                                maxreplace=1)
107                             col_data = " |prein| {0} |preout| ".\
108                                 format(col_data[:-5])
109                         row_lst.append('"{0}"'.format(col_data))
110                     except KeyError:
111                         row_lst.append("No data")
112                 table_lst.append(row_lst)
113
114         # Write the data to file
115         if table_lst:
116             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
117                                             table["output-file-ext"])
118             logging.info("      Writing file: '{}'".format(file_name))
119             with open(file_name, "w") as file_handler:
120                 file_handler.write(",".join(header) + "\n")
121                 for item in table_lst:
122                     file_handler.write(",".join(item) + "\n")
123
124     logging.info("  Done.")
125
126
127 def table_merged_details(table, input_data):
128     """Generate the table(s) with algorithm: table_merged_details
129     specified in the specification file.
130
131     :param table: Table to generate.
132     :param input_data: Data to process.
133     :type table: pandas.Series
134     :type input_data: InputData
135     """
136
137     logging.info("  Generating the table {0} ...".
138                  format(table.get("title", "")))
139
140     # Transform the data
141     logging.info("    Creating the data set for the {0} '{1}'.".
142                  format(table.get("type", ""), table.get("title", "")))
143     data = input_data.filter_data(table, continue_on_error=True)
144     data = input_data.merge_data(data)
145     data.sort_index(inplace=True)
146
147     logging.info("    Creating the data set for the {0} '{1}'.".
148                  format(table.get("type", ""), table.get("title", "")))
149     suites = input_data.filter_data(
150         table, continue_on_error=True, data_set="suites")
151     suites = input_data.merge_data(suites)
152
153     # Prepare the header of the tables
154     header = list()
155     for column in table["columns"]:
156         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
157
158     for _, suite in suites.iteritems():
159         # Generate data
160         suite_name = suite["name"]
161         table_lst = list()
162         for test in data.keys():
163             if data[test]["parent"] in suite_name:
164                 row_lst = list()
165                 for column in table["columns"]:
166                     try:
167                         col_data = str(data[test][column["data"].
168                                        split(" ")[1]]).replace('"', '""')
169                         col_data = replace(col_data, "No Data",
170                                            "Not Captured     ")
171                         if column["data"].split(" ")[1] in ("conf-history",
172                                                             "show-run"):
173                             col_data = replace(col_data, " |br| ", "",
174                                                maxreplace=1)
175                             col_data = " |prein| {0} |preout| ".\
176                                 format(col_data[:-5])
177                         row_lst.append('"{0}"'.format(col_data))
178                     except KeyError:
179                         row_lst.append('"Not captured"')
180                 table_lst.append(row_lst)
181
182         # Write the data to file
183         if table_lst:
184             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
185                                             table["output-file-ext"])
186             logging.info("      Writing file: '{}'".format(file_name))
187             with open(file_name, "w") as file_handler:
188                 file_handler.write(",".join(header) + "\n")
189                 for item in table_lst:
190                     file_handler.write(",".join(item) + "\n")
191
192     logging.info("  Done.")
193
194
195 def _tpc_modify_test_name(test_name):
196     test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
197         replace("-ndrpdr", "").replace("-pdrdisc", ""). \
198         replace("-ndrdisc", "").replace("-pdr", ""). \
199         replace("-ndr", ""). \
200         replace("1t1c", "1c").replace("2t1c", "1c"). \
201         replace("2t2c", "2c").replace("4t2c", "2c"). \
202         replace("4t4c", "4c").replace("8t4c", "4c")
203     test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
204     return test_name_mod
205
206
207 def _tpc_modify_displayed_test_name(test_name):
208     return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
209         replace("2t2c", "2c").replace("4t2c", "2c"). \
210         replace("4t4c", "4c").replace("8t4c", "4c")
211
212
213 def _tpc_insert_data(target, src, include_tests):
214     try:
215         if include_tests == "MRR":
216             target.append(src["result"]["receive-rate"].avg)
217         elif include_tests == "PDR":
218             target.append(src["throughput"]["PDR"]["LOWER"])
219         elif include_tests == "NDR":
220             target.append(src["throughput"]["NDR"]["LOWER"])
221     except (KeyError, TypeError):
222         pass
223
224
225 def _tpc_sort_table(table):
226     # Sort the table:
227     # 1. New in CSIT-XXXX
228     # 2. See footnote
229     # 3. Delta
230     tbl_new = list()
231     tbl_see = list()
232     tbl_delta = list()
233     for item in table:
234         if isinstance(item[-1], str):
235             if "New in CSIT" in item[-1]:
236                 tbl_new.append(item)
237             elif "See footnote" in item[-1]:
238                 tbl_see.append(item)
239         else:
240             tbl_delta.append(item)
241
242     # Sort the tables:
243     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
244     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
245     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
246     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
247
248     # Put the tables together:
249     table = list()
250     table.extend(tbl_new)
251     table.extend(tbl_see)
252     table.extend(tbl_delta)
253
254     return table
255
256
257 def _tpc_generate_html_table(header, data, output_file_name):
258     """Generate html table from input data with simple sorting possibility.
259
260     :param header: Table header.
261     :param data: Input data to be included in the table. It is a list of lists.
262         Inner lists are rows in the table. All inner lists must be of the same
263         length. The length of these lists must be the same as the length of the
264         header.
265     :param output_file_name: The name (relative or full path) where the
266         generated html table is written.
267     :type header: list
268     :type data: list of lists
269     :type output_file_name: str
270     """
271
272     df = pd.DataFrame(data, columns=header)
273
274     df_sorted = [df.sort_values(
275         by=[key, header[0]], ascending=[True, True]
276         if key != header[0] else [False, True]) for key in header]
277     df_sorted_rev = [df.sort_values(
278         by=[key, header[0]], ascending=[False, True]
279         if key != header[0] else [True, True]) for key in header]
280     df_sorted.extend(df_sorted_rev)
281
282     fill_color = [["#d4e4f7" if idx % 2 else "#e9f1fb"
283                    for idx in range(len(df))]]
284     table_header = dict(
285         values=["<b>{item}</b>".format(item=item) for item in header],
286         fill_color="#7eade7",
287         align=["left", "center"]
288     )
289
290     fig = go.Figure()
291
292     for table in df_sorted:
293         columns = [table.get(col) for col in header]
294         fig.add_trace(
295             go.Table(
296                 columnwidth=[30, 10],
297                 header=table_header,
298                 cells=dict(
299                     values=columns,
300                     fill_color=fill_color,
301                     align=["left", "right"]
302                 )
303             )
304         )
305
306     buttons = list()
307     menu_items = ["<b>{0}</b> (ascending)".format(itm) for itm in header]
308     menu_items_rev = ["<b>{0}</b> (descending)".format(itm) for itm in header]
309     menu_items.extend(menu_items_rev)
310     for idx, hdr in enumerate(menu_items):
311         visible = [False, ] * len(menu_items)
312         visible[idx] = True
313         buttons.append(
314             dict(
315                 label=hdr.replace(" [Mpps]", ""),
316                 method="update",
317                 args=[{"visible": visible}],
318             )
319         )
320
321     fig.update_layout(
322         updatemenus=[
323             go.layout.Updatemenu(
324                 type="dropdown",
325                 direction="down",
326                 x=0.03,
327                 xanchor="left",
328                 y=1.045,
329                 yanchor="top",
330                 active=len(menu_items) - 1,
331                 buttons=list(buttons)
332             )
333         ],
334         annotations=[
335             go.layout.Annotation(
336                 text="<b>Sort by:</b>",
337                 x=0,
338                 xref="paper",
339                 y=1.035,
340                 yref="paper",
341                 align="left",
342                 showarrow=False
343             )
344         ]
345     )
346
347     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
348
349
350 def table_performance_comparison(table, input_data):
351     """Generate the table(s) with algorithm: table_performance_comparison
352     specified in the specification file.
353
354     :param table: Table to generate.
355     :param input_data: Data to process.
356     :type table: pandas.Series
357     :type input_data: InputData
358     """
359
360     logging.info("  Generating the table {0} ...".
361                  format(table.get("title", "")))
362
363     # Transform the data
364     logging.info("    Creating the data set for the {0} '{1}'.".
365                  format(table.get("type", ""), table.get("title", "")))
366     data = input_data.filter_data(table, continue_on_error=True)
367
368     # Prepare the header of the tables
369     try:
370         header = ["Test case", ]
371
372         if table["include-tests"] == "MRR":
373             hdr_param = "Rec Rate"
374         else:
375             hdr_param = "Thput"
376
377         history = table.get("history", None)
378         if history:
379             for item in history:
380                 header.extend(
381                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
382                      "{0} Stdev [Mpps]".format(item["title"])])
383         header.extend(
384             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
385              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
386              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
387              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
388              "Delta [%]"])
389         header_str = ",".join(header) + "\n"
390     except (AttributeError, KeyError) as err:
391         logging.error("The model is invalid, missing parameter: {0}".
392                       format(err))
393         return
394
395     # Prepare data to the table:
396     tbl_dict = dict()
397     topo = ""
398     for job, builds in table["reference"]["data"].items():
399         topo = "2n-skx" if "2n-skx" in job else ""
400         for build in builds:
401             for tst_name, tst_data in data[job][str(build)].iteritems():
402                 tst_name_mod = _tpc_modify_test_name(tst_name)
403                 if "across topologies" in table["title"].lower():
404                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
405                 if tbl_dict.get(tst_name_mod, None) is None:
406                     groups = re.search(REGEX_NIC, tst_data["parent"])
407                     nic = groups.group(0) if groups else ""
408                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
409                                                           split("-")[:-1]))
410                     if "across testbeds" in table["title"].lower() or \
411                             "across topologies" in table["title"].lower():
412                         name = _tpc_modify_displayed_test_name(name)
413                     tbl_dict[tst_name_mod] = {"name": name,
414                                               "ref-data": list(),
415                                               "cmp-data": list()}
416                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
417                                  src=tst_data,
418                                  include_tests=table["include-tests"])
419
420     for job, builds in table["compare"]["data"].items():
421         for build in builds:
422             for tst_name, tst_data in data[job][str(build)].iteritems():
423                 tst_name_mod = _tpc_modify_test_name(tst_name)
424                 if "across topologies" in table["title"].lower():
425                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
426                 if tbl_dict.get(tst_name_mod, None) is None:
427                     groups = re.search(REGEX_NIC, tst_data["parent"])
428                     nic = groups.group(0) if groups else ""
429                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
430                                                           split("-")[:-1]))
431                     if "across testbeds" in table["title"].lower() or \
432                             "across topologies" in table["title"].lower():
433                         name = _tpc_modify_displayed_test_name(name)
434                     tbl_dict[tst_name_mod] = {"name": name,
435                                               "ref-data": list(),
436                                               "cmp-data": list()}
437                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
438                                  src=tst_data,
439                                  include_tests=table["include-tests"])
440
441     replacement = table["compare"].get("data-replacement", None)
442     if replacement:
443         create_new_list = True
444         rpl_data = input_data.filter_data(
445             table, data=replacement, continue_on_error=True)
446         for job, builds in replacement.items():
447             for build in builds:
448                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
449                     tst_name_mod = _tpc_modify_test_name(tst_name)
450                     if "across topologies" in table["title"].lower():
451                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
452                     if tbl_dict.get(tst_name_mod, None) is None:
453                         name = "{0}".format("-".join(tst_data["name"].
454                                                      split("-")[:-1]))
455                         if "across testbeds" in table["title"].lower() or \
456                                 "across topologies" in table["title"].lower():
457                             name = _tpc_modify_displayed_test_name(name)
458                         tbl_dict[tst_name_mod] = {"name": name,
459                                                   "ref-data": list(),
460                                                   "cmp-data": list()}
461                     if create_new_list:
462                         create_new_list = False
463                         tbl_dict[tst_name_mod]["cmp-data"] = list()
464
465                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
466                                      src=tst_data,
467                                      include_tests=table["include-tests"])
468
469     if history:
470         for item in history:
471             for job, builds in item["data"].items():
472                 for build in builds:
473                     for tst_name, tst_data in data[job][str(build)].iteritems():
474                         tst_name_mod = _tpc_modify_test_name(tst_name)
475                         if "across topologies" in table["title"].lower():
476                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
477                         if tbl_dict.get(tst_name_mod, None) is None:
478                             continue
479                         if tbl_dict[tst_name_mod].get("history", None) is None:
480                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
481                         if tbl_dict[tst_name_mod]["history"].\
482                                 get(item["title"], None) is None:
483                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
484                                 list()
485                         try:
486                             # TODO: Re-work when NDRPDRDISC tests are not used
487                             if table["include-tests"] == "MRR":
488                                 tbl_dict[tst_name_mod]["history"][item[
489                                     "title"]].append(tst_data["result"][
490                                         "receive-rate"].avg)
491                             elif table["include-tests"] == "PDR":
492                                 if tst_data["type"] == "PDR":
493                                     tbl_dict[tst_name_mod]["history"][
494                                         item["title"]].\
495                                         append(tst_data["throughput"]["value"])
496                                 elif tst_data["type"] == "NDRPDR":
497                                     tbl_dict[tst_name_mod]["history"][item[
498                                         "title"]].append(tst_data["throughput"][
499                                             "PDR"]["LOWER"])
500                             elif table["include-tests"] == "NDR":
501                                 if tst_data["type"] == "NDR":
502                                     tbl_dict[tst_name_mod]["history"][
503                                         item["title"]].\
504                                         append(tst_data["throughput"]["value"])
505                                 elif tst_data["type"] == "NDRPDR":
506                                     tbl_dict[tst_name_mod]["history"][item[
507                                         "title"]].append(tst_data["throughput"][
508                                             "NDR"]["LOWER"])
509                             else:
510                                 continue
511                         except (TypeError, KeyError):
512                             pass
513
514     tbl_lst = list()
515     footnote = False
516     for tst_name in tbl_dict.keys():
517         item = [tbl_dict[tst_name]["name"], ]
518         if history:
519             if tbl_dict[tst_name].get("history", None) is not None:
520                 for hist_data in tbl_dict[tst_name]["history"].values():
521                     if hist_data:
522                         item.append(round(mean(hist_data) / 1000000, 2))
523                         item.append(round(stdev(hist_data) / 1000000, 2))
524                     else:
525                         item.extend(["Not tested", "Not tested"])
526             else:
527                 item.extend(["Not tested", "Not tested"])
528         data_t = tbl_dict[tst_name]["ref-data"]
529         if data_t:
530             item.append(round(mean(data_t) / 1000000, 2))
531             item.append(round(stdev(data_t) / 1000000, 2))
532         else:
533             item.extend(["Not tested", "Not tested"])
534         data_t = tbl_dict[tst_name]["cmp-data"]
535         if data_t:
536             item.append(round(mean(data_t) / 1000000, 2))
537             item.append(round(stdev(data_t) / 1000000, 2))
538         else:
539             item.extend(["Not tested", "Not tested"])
540         if item[-2] == "Not tested":
541             pass
542         elif item[-4] == "Not tested":
543             item.append("New in CSIT-1908")
544         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
545             item.append("See footnote [1]")
546             footnote = True
547         elif item[-4] != 0:
548             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
549         if (len(item) == len(header)) and (item[-3] != "Not tested"):
550             tbl_lst.append(item)
551
552     tbl_lst = _tpc_sort_table(tbl_lst)
553
554     # Generate csv tables:
555     csv_file = "{0}.csv".format(table["output-file"])
556     with open(csv_file, "w") as file_handler:
557         file_handler.write(header_str)
558         for test in tbl_lst:
559             file_handler.write(",".join([str(item) for item in test]) + "\n")
560
561     txt_file_name = "{0}.txt".format(table["output-file"])
562     convert_csv_to_pretty_txt(csv_file, txt_file_name)
563
564     if footnote:
565         with open(txt_file_name, 'a') as txt_file:
566             txt_file.writelines([
567                 "\nFootnotes:\n",
568                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
569                 "2-node testbeds, dot1q encapsulation is now used on both "
570                 "links of SUT.\n",
571                 "    Previously dot1q was used only on a single link with the "
572                 "other link carrying untagged Ethernet frames. This changes "
573                 "results\n",
574                 "    in slightly lower throughput in CSIT-1908 for these "
575                 "tests. See release notes."
576             ])
577
578     # Generate html table:
579     _tpc_generate_html_table(header, tbl_lst,
580                              "{0}.html".format(table["output-file"]))
581
582
583 def table_performance_comparison_nic(table, input_data):
584     """Generate the table(s) with algorithm: table_performance_comparison
585     specified in the specification file.
586
587     :param table: Table to generate.
588     :param input_data: Data to process.
589     :type table: pandas.Series
590     :type input_data: InputData
591     """
592
593     logging.info("  Generating the table {0} ...".
594                  format(table.get("title", "")))
595
596     # Transform the data
597     logging.info("    Creating the data set for the {0} '{1}'.".
598                  format(table.get("type", ""), table.get("title", "")))
599     data = input_data.filter_data(table, continue_on_error=True)
600
601     # Prepare the header of the tables
602     try:
603         header = ["Test case", ]
604
605         if table["include-tests"] == "MRR":
606             hdr_param = "Rec Rate"
607         else:
608             hdr_param = "Thput"
609
610         history = table.get("history", None)
611         if history:
612             for item in history:
613                 header.extend(
614                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
615                      "{0} Stdev [Mpps]".format(item["title"])])
616         header.extend(
617             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
618              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
619              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
620              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
621              "Delta [%]"])
622         header_str = ",".join(header) + "\n"
623     except (AttributeError, KeyError) as err:
624         logging.error("The model is invalid, missing parameter: {0}".
625                       format(err))
626         return
627
628     # Prepare data to the table:
629     tbl_dict = dict()
630     topo = ""
631     for job, builds in table["reference"]["data"].items():
632         topo = "2n-skx" if "2n-skx" in job else ""
633         for build in builds:
634             for tst_name, tst_data in data[job][str(build)].iteritems():
635                 if table["reference"]["nic"] not in tst_data["tags"]:
636                     continue
637                 tst_name_mod = _tpc_modify_test_name(tst_name)
638                 if "across topologies" in table["title"].lower():
639                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
640                 if tbl_dict.get(tst_name_mod, None) is None:
641                     name = "{0}".format("-".join(tst_data["name"].
642                                                  split("-")[:-1]))
643                     if "across testbeds" in table["title"].lower() or \
644                             "across topologies" in table["title"].lower():
645                         name = _tpc_modify_displayed_test_name(name)
646                     tbl_dict[tst_name_mod] = {"name": name,
647                                               "ref-data": list(),
648                                               "cmp-data": list()}
649                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
650                                  src=tst_data,
651                                  include_tests=table["include-tests"])
652
653     for job, builds in table["compare"]["data"].items():
654         for build in builds:
655             for tst_name, tst_data in data[job][str(build)].iteritems():
656                 if table["compare"]["nic"] not in tst_data["tags"]:
657                     continue
658                 tst_name_mod = _tpc_modify_test_name(tst_name)
659                 if "across topologies" in table["title"].lower():
660                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
661                 if tbl_dict.get(tst_name_mod, None) is None:
662                     name = "{0}".format("-".join(tst_data["name"].
663                                                  split("-")[:-1]))
664                     if "across testbeds" in table["title"].lower() or \
665                             "across topologies" in table["title"].lower():
666                         name = _tpc_modify_displayed_test_name(name)
667                     tbl_dict[tst_name_mod] = {"name": name,
668                                               "ref-data": list(),
669                                               "cmp-data": list()}
670                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
671                                  src=tst_data,
672                                  include_tests=table["include-tests"])
673
674     replacement = table["compare"].get("data-replacement", None)
675     if replacement:
676         create_new_list = True
677         rpl_data = input_data.filter_data(
678             table, data=replacement, continue_on_error=True)
679         for job, builds in replacement.items():
680             for build in builds:
681                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
682                     if table["compare"]["nic"] not in tst_data["tags"]:
683                         continue
684                     tst_name_mod = _tpc_modify_test_name(tst_name)
685                     if "across topologies" in table["title"].lower():
686                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
687                     if tbl_dict.get(tst_name_mod, None) is None:
688                         name = "{0}".format("-".join(tst_data["name"].
689                                                      split("-")[:-1]))
690                         if "across testbeds" in table["title"].lower() or \
691                                 "across topologies" in table["title"].lower():
692                             name = _tpc_modify_displayed_test_name(name)
693                         tbl_dict[tst_name_mod] = {"name": name,
694                                                   "ref-data": list(),
695                                                   "cmp-data": list()}
696                     if create_new_list:
697                         create_new_list = False
698                         tbl_dict[tst_name_mod]["cmp-data"] = list()
699
700                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
701                                      src=tst_data,
702                                      include_tests=table["include-tests"])
703
704     if history:
705         for item in history:
706             for job, builds in item["data"].items():
707                 for build in builds:
708                     for tst_name, tst_data in data[job][str(build)].iteritems():
709                         if item["nic"] not in tst_data["tags"]:
710                             continue
711                         tst_name_mod = _tpc_modify_test_name(tst_name)
712                         if "across topologies" in table["title"].lower():
713                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
714                         if tbl_dict.get(tst_name_mod, None) is None:
715                             continue
716                         if tbl_dict[tst_name_mod].get("history", None) is None:
717                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
718                         if tbl_dict[tst_name_mod]["history"].\
719                                 get(item["title"], None) is None:
720                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
721                                 list()
722                         try:
723                             # TODO: Re-work when NDRPDRDISC tests are not used
724                             if table["include-tests"] == "MRR":
725                                 tbl_dict[tst_name_mod]["history"][item[
726                                     "title"]].append(tst_data["result"][
727                                         "receive-rate"].avg)
728                             elif table["include-tests"] == "PDR":
729                                 if tst_data["type"] == "PDR":
730                                     tbl_dict[tst_name_mod]["history"][
731                                         item["title"]].\
732                                         append(tst_data["throughput"]["value"])
733                                 elif tst_data["type"] == "NDRPDR":
734                                     tbl_dict[tst_name_mod]["history"][item[
735                                         "title"]].append(tst_data["throughput"][
736                                             "PDR"]["LOWER"])
737                             elif table["include-tests"] == "NDR":
738                                 if tst_data["type"] == "NDR":
739                                     tbl_dict[tst_name_mod]["history"][
740                                         item["title"]].\
741                                         append(tst_data["throughput"]["value"])
742                                 elif tst_data["type"] == "NDRPDR":
743                                     tbl_dict[tst_name_mod]["history"][item[
744                                         "title"]].append(tst_data["throughput"][
745                                             "NDR"]["LOWER"])
746                             else:
747                                 continue
748                         except (TypeError, KeyError):
749                             pass
750
751     tbl_lst = list()
752     footnote = False
753     for tst_name in tbl_dict.keys():
754         item = [tbl_dict[tst_name]["name"], ]
755         if history:
756             if tbl_dict[tst_name].get("history", None) is not None:
757                 for hist_data in tbl_dict[tst_name]["history"].values():
758                     if hist_data:
759                         item.append(round(mean(hist_data) / 1000000, 2))
760                         item.append(round(stdev(hist_data) / 1000000, 2))
761                     else:
762                         item.extend(["Not tested", "Not tested"])
763             else:
764                 item.extend(["Not tested", "Not tested"])
765         data_t = tbl_dict[tst_name]["ref-data"]
766         if data_t:
767             item.append(round(mean(data_t) / 1000000, 2))
768             item.append(round(stdev(data_t) / 1000000, 2))
769         else:
770             item.extend(["Not tested", "Not tested"])
771         data_t = tbl_dict[tst_name]["cmp-data"]
772         if data_t:
773             item.append(round(mean(data_t) / 1000000, 2))
774             item.append(round(stdev(data_t) / 1000000, 2))
775         else:
776             item.extend(["Not tested", "Not tested"])
777         if item[-2] == "Not tested":
778             pass
779         elif item[-4] == "Not tested":
780             item.append("New in CSIT-1908")
781         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
782             item.append("See footnote [1]")
783             footnote = True
784         elif item[-4] != 0:
785             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
786         if (len(item) == len(header)) and (item[-3] != "Not tested"):
787             tbl_lst.append(item)
788
789     tbl_lst = _tpc_sort_table(tbl_lst)
790
791     # Generate csv tables:
792     csv_file = "{0}.csv".format(table["output-file"])
793     with open(csv_file, "w") as file_handler:
794         file_handler.write(header_str)
795         for test in tbl_lst:
796             file_handler.write(",".join([str(item) for item in test]) + "\n")
797
798     txt_file_name = "{0}.txt".format(table["output-file"])
799     convert_csv_to_pretty_txt(csv_file, txt_file_name)
800
801     if footnote:
802         with open(txt_file_name, 'a') as txt_file:
803             txt_file.writelines([
804                 "\nFootnotes:\n",
805                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
806                 "2-node testbeds, dot1q encapsulation is now used on both "
807                 "links of SUT.\n",
808                 "    Previously dot1q was used only on a single link with the "
809                 "other link carrying untagged Ethernet frames. This changes "
810                 "results\n",
811                 "    in slightly lower throughput in CSIT-1908 for these "
812                 "tests. See release notes."
813             ])
814
815     # Generate html table:
816     _tpc_generate_html_table(header, tbl_lst,
817                              "{0}.html".format(table["output-file"]))
818
819
820 def table_nics_comparison(table, input_data):
821     """Generate the table(s) with algorithm: table_nics_comparison
822     specified in the specification file.
823
824     :param table: Table to generate.
825     :param input_data: Data to process.
826     :type table: pandas.Series
827     :type input_data: InputData
828     """
829
830     logging.info("  Generating the table {0} ...".
831                  format(table.get("title", "")))
832
833     # Transform the data
834     logging.info("    Creating the data set for the {0} '{1}'.".
835                  format(table.get("type", ""), table.get("title", "")))
836     data = input_data.filter_data(table, continue_on_error=True)
837
838     # Prepare the header of the tables
839     try:
840         header = ["Test case", ]
841
842         if table["include-tests"] == "MRR":
843             hdr_param = "Rec Rate"
844         else:
845             hdr_param = "Thput"
846
847         header.extend(
848             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
849              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
850              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
851              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
852              "Delta [%]"])
853         header_str = ",".join(header) + "\n"
854     except (AttributeError, KeyError) as err:
855         logging.error("The model is invalid, missing parameter: {0}".
856                       format(err))
857         return
858
859     # Prepare data to the table:
860     tbl_dict = dict()
861     for job, builds in table["data"].items():
862         for build in builds:
863             for tst_name, tst_data in data[job][str(build)].iteritems():
864                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
865                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
866                     replace("-ndrdisc", "").replace("-pdr", "").\
867                     replace("-ndr", "").\
868                     replace("1t1c", "1c").replace("2t1c", "1c").\
869                     replace("2t2c", "2c").replace("4t2c", "2c").\
870                     replace("4t4c", "4c").replace("8t4c", "4c")
871                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
872                 if tbl_dict.get(tst_name_mod, None) is None:
873                     name = "-".join(tst_data["name"].split("-")[:-1])
874                     tbl_dict[tst_name_mod] = {"name": name,
875                                               "ref-data": list(),
876                                               "cmp-data": list()}
877                 try:
878                     if table["include-tests"] == "MRR":
879                         result = tst_data["result"]["receive-rate"].avg
880                     elif table["include-tests"] == "PDR":
881                         result = tst_data["throughput"]["PDR"]["LOWER"]
882                     elif table["include-tests"] == "NDR":
883                         result = tst_data["throughput"]["NDR"]["LOWER"]
884                     else:
885                         result = None
886
887                     if result:
888                         if table["reference"]["nic"] in tst_data["tags"]:
889                             tbl_dict[tst_name_mod]["ref-data"].append(result)
890                         elif table["compare"]["nic"] in tst_data["tags"]:
891                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
892                 except (TypeError, KeyError) as err:
893                     logging.debug("No data for {0}".format(tst_name))
894                     logging.debug(repr(err))
895                     # No data in output.xml for this test
896
897     tbl_lst = list()
898     for tst_name in tbl_dict.keys():
899         item = [tbl_dict[tst_name]["name"], ]
900         data_t = tbl_dict[tst_name]["ref-data"]
901         if data_t:
902             item.append(round(mean(data_t) / 1000000, 2))
903             item.append(round(stdev(data_t) / 1000000, 2))
904         else:
905             item.extend([None, None])
906         data_t = tbl_dict[tst_name]["cmp-data"]
907         if data_t:
908             item.append(round(mean(data_t) / 1000000, 2))
909             item.append(round(stdev(data_t) / 1000000, 2))
910         else:
911             item.extend([None, None])
912         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
913             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
914         if len(item) == len(header):
915             tbl_lst.append(item)
916
917     # Sort the table according to the relative change
918     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
919
920     # Generate csv tables:
921     csv_file = "{0}.csv".format(table["output-file"])
922     with open(csv_file, "w") as file_handler:
923         file_handler.write(header_str)
924         for test in tbl_lst:
925             file_handler.write(",".join([str(item) for item in test]) + "\n")
926
927     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
928
929     # Generate html table:
930     _tpc_generate_html_table(header, tbl_lst,
931                              "{0}.html".format(table["output-file"]))
932
933
934 def table_soak_vs_ndr(table, input_data):
935     """Generate the table(s) with algorithm: table_soak_vs_ndr
936     specified in the specification file.
937
938     :param table: Table to generate.
939     :param input_data: Data to process.
940     :type table: pandas.Series
941     :type input_data: InputData
942     """
943
944     logging.info("  Generating the table {0} ...".
945                  format(table.get("title", "")))
946
947     # Transform the data
948     logging.info("    Creating the data set for the {0} '{1}'.".
949                  format(table.get("type", ""), table.get("title", "")))
950     data = input_data.filter_data(table, continue_on_error=True)
951
952     # Prepare the header of the table
953     try:
954         header = [
955             "Test case",
956             "{0} Thput [Mpps]".format(table["reference"]["title"]),
957             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
958             "{0} Thput [Mpps]".format(table["compare"]["title"]),
959             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
960             "Delta [%]", "Stdev of delta [%]"]
961         header_str = ",".join(header) + "\n"
962     except (AttributeError, KeyError) as err:
963         logging.error("The model is invalid, missing parameter: {0}".
964                       format(err))
965         return
966
967     # Create a list of available SOAK test results:
968     tbl_dict = dict()
969     for job, builds in table["compare"]["data"].items():
970         for build in builds:
971             for tst_name, tst_data in data[job][str(build)].iteritems():
972                 if tst_data["type"] == "SOAK":
973                     tst_name_mod = tst_name.replace("-soak", "")
974                     if tbl_dict.get(tst_name_mod, None) is None:
975                         groups = re.search(REGEX_NIC, tst_data["parent"])
976                         nic = groups.group(0) if groups else ""
977                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
978                                                               split("-")[:-1]))
979                         tbl_dict[tst_name_mod] = {
980                             "name": name,
981                             "ref-data": list(),
982                             "cmp-data": list()
983                         }
984                     try:
985                         tbl_dict[tst_name_mod]["cmp-data"].append(
986                             tst_data["throughput"]["LOWER"])
987                     except (KeyError, TypeError):
988                         pass
989     tests_lst = tbl_dict.keys()
990
991     # Add corresponding NDR test results:
992     for job, builds in table["reference"]["data"].items():
993         for build in builds:
994             for tst_name, tst_data in data[job][str(build)].iteritems():
995                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
996                     replace("-mrr", "")
997                 if tst_name_mod in tests_lst:
998                     try:
999                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
1000                             if table["include-tests"] == "MRR":
1001                                 result = tst_data["result"]["receive-rate"].avg
1002                             elif table["include-tests"] == "PDR":
1003                                 result = tst_data["throughput"]["PDR"]["LOWER"]
1004                             elif table["include-tests"] == "NDR":
1005                                 result = tst_data["throughput"]["NDR"]["LOWER"]
1006                             else:
1007                                 result = None
1008                             if result is not None:
1009                                 tbl_dict[tst_name_mod]["ref-data"].append(
1010                                     result)
1011                     except (KeyError, TypeError):
1012                         continue
1013
1014     tbl_lst = list()
1015     for tst_name in tbl_dict.keys():
1016         item = [tbl_dict[tst_name]["name"], ]
1017         data_r = tbl_dict[tst_name]["ref-data"]
1018         if data_r:
1019             data_r_mean = mean(data_r)
1020             item.append(round(data_r_mean / 1000000, 2))
1021             data_r_stdev = stdev(data_r)
1022             item.append(round(data_r_stdev / 1000000, 2))
1023         else:
1024             data_r_mean = None
1025             data_r_stdev = None
1026             item.extend([None, None])
1027         data_c = tbl_dict[tst_name]["cmp-data"]
1028         if data_c:
1029             data_c_mean = mean(data_c)
1030             item.append(round(data_c_mean / 1000000, 2))
1031             data_c_stdev = stdev(data_c)
1032             item.append(round(data_c_stdev / 1000000, 2))
1033         else:
1034             data_c_mean = None
1035             data_c_stdev = None
1036             item.extend([None, None])
1037         if data_r_mean and data_c_mean:
1038             delta, d_stdev = relative_change_stdev(
1039                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1040             item.append(round(delta, 2))
1041             item.append(round(d_stdev, 2))
1042             tbl_lst.append(item)
1043
1044     # Sort the table according to the relative change
1045     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1046
1047     # Generate csv tables:
1048     csv_file = "{0}.csv".format(table["output-file"])
1049     with open(csv_file, "w") as file_handler:
1050         file_handler.write(header_str)
1051         for test in tbl_lst:
1052             file_handler.write(",".join([str(item) for item in test]) + "\n")
1053
1054     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
1055
1056     # Generate html table:
1057     _tpc_generate_html_table(header, tbl_lst,
1058                              "{0}.html".format(table["output-file"]))
1059
1060
1061 def table_performance_trending_dashboard(table, input_data):
1062     """Generate the table(s) with algorithm:
1063     table_performance_trending_dashboard
1064     specified in the specification file.
1065
1066     :param table: Table to generate.
1067     :param input_data: Data to process.
1068     :type table: pandas.Series
1069     :type input_data: InputData
1070     """
1071
1072     logging.info("  Generating the table {0} ...".
1073                  format(table.get("title", "")))
1074
1075     # Transform the data
1076     logging.info("    Creating the data set for the {0} '{1}'.".
1077                  format(table.get("type", ""), table.get("title", "")))
1078     data = input_data.filter_data(table, continue_on_error=True)
1079
1080     # Prepare the header of the tables
1081     header = ["Test Case",
1082               "Trend [Mpps]",
1083               "Short-Term Change [%]",
1084               "Long-Term Change [%]",
1085               "Regressions [#]",
1086               "Progressions [#]"
1087               ]
1088     header_str = ",".join(header) + "\n"
1089
1090     # Prepare data to the table:
1091     tbl_dict = dict()
1092     for job, builds in table["data"].items():
1093         for build in builds:
1094             for tst_name, tst_data in data[job][str(build)].iteritems():
1095                 if tst_name.lower() in table.get("ignore-list", list()):
1096                     continue
1097                 if tbl_dict.get(tst_name, None) is None:
1098                     groups = re.search(REGEX_NIC, tst_data["parent"])
1099                     if not groups:
1100                         continue
1101                     nic = groups.group(0)
1102                     tbl_dict[tst_name] = {
1103                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1104                         "data": OrderedDict()}
1105                 try:
1106                     tbl_dict[tst_name]["data"][str(build)] = \
1107                         tst_data["result"]["receive-rate"]
1108                 except (TypeError, KeyError):
1109                     pass  # No data in output.xml for this test
1110
1111     tbl_lst = list()
1112     for tst_name in tbl_dict.keys():
1113         data_t = tbl_dict[tst_name]["data"]
1114         if len(data_t) < 2:
1115             continue
1116
1117         classification_lst, avgs = classify_anomalies(data_t)
1118
1119         win_size = min(len(data_t), table["window"])
1120         long_win_size = min(len(data_t), table["long-trend-window"])
1121
1122         try:
1123             max_long_avg = max(
1124                 [x for x in avgs[-long_win_size:-win_size]
1125                  if not isnan(x)])
1126         except ValueError:
1127             max_long_avg = nan
1128         last_avg = avgs[-1]
1129         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1130
1131         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1132             rel_change_last = nan
1133         else:
1134             rel_change_last = round(
1135                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1136
1137         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1138             rel_change_long = nan
1139         else:
1140             rel_change_long = round(
1141                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1142
1143         if classification_lst:
1144             if isnan(rel_change_last) and isnan(rel_change_long):
1145                 continue
1146             if isnan(last_avg) or isnan(rel_change_last) or \
1147                     isnan(rel_change_long):
1148                 continue
1149             tbl_lst.append(
1150                 [tbl_dict[tst_name]["name"],
1151                  round(last_avg / 1000000, 2),
1152                  rel_change_last,
1153                  rel_change_long,
1154                  classification_lst[-win_size:].count("regression"),
1155                  classification_lst[-win_size:].count("progression")])
1156
1157     tbl_lst.sort(key=lambda rel: rel[0])
1158
1159     tbl_sorted = list()
1160     for nrr in range(table["window"], -1, -1):
1161         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1162         for nrp in range(table["window"], -1, -1):
1163             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1164             tbl_out.sort(key=lambda rel: rel[2])
1165             tbl_sorted.extend(tbl_out)
1166
1167     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1168
1169     logging.info("    Writing file: '{0}'".format(file_name))
1170     with open(file_name, "w") as file_handler:
1171         file_handler.write(header_str)
1172         for test in tbl_sorted:
1173             file_handler.write(",".join([str(item) for item in test]) + '\n')
1174
1175     txt_file_name = "{0}.txt".format(table["output-file"])
1176     logging.info("    Writing file: '{0}'".format(txt_file_name))
1177     convert_csv_to_pretty_txt(file_name, txt_file_name)
1178
1179
1180 def _generate_url(base, testbed, test_name):
1181     """Generate URL to a trending plot from the name of the test case.
1182
1183     :param base: The base part of URL common to all test cases.
1184     :param testbed: The testbed used for testing.
1185     :param test_name: The name of the test case.
1186     :type base: str
1187     :type testbed: str
1188     :type test_name: str
1189     :returns: The URL to the plot with the trending data for the given test
1190         case.
1191     :rtype str
1192     """
1193
1194     url = base
1195     file_name = ""
1196     anchor = ".html#"
1197     feature = ""
1198
1199     if "lbdpdk" in test_name or "lbvpp" in test_name:
1200         file_name = "link_bonding"
1201
1202     elif "114b" in test_name and "vhost" in test_name:
1203         file_name = "vts"
1204
1205     elif "testpmd" in test_name or "l3fwd" in test_name:
1206         file_name = "dpdk"
1207
1208     elif "memif" in test_name:
1209         file_name = "container_memif"
1210         feature = "-base"
1211
1212     elif "srv6" in test_name:
1213         file_name = "srv6"
1214
1215     elif "vhost" in test_name:
1216         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1217             file_name = "vm_vhost_l2"
1218             if "114b" in test_name:
1219                 feature = ""
1220             elif "l2xcbase" in test_name and "x520" in test_name:
1221                 feature = "-base-l2xc"
1222             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1223                 feature = "-base-l2bd"
1224             else:
1225                 feature = "-base"
1226         elif "ip4base" in test_name:
1227             file_name = "vm_vhost_ip4"
1228             feature = "-base"
1229
1230     elif "ipsecbasetnlsw" in test_name:
1231         file_name = "ipsecsw"
1232         feature = "-base-scale"
1233
1234     elif "ipsec" in test_name:
1235         file_name = "ipsec"
1236         feature = "-base-scale"
1237         if "hw-" in test_name:
1238             file_name = "ipsechw"
1239         elif "sw-" in test_name:
1240             file_name = "ipsecsw"
1241         if "-int-" in test_name:
1242             feature = "-base-scale-int"
1243         elif "tnl" in test_name:
1244             feature = "-base-scale-tnl"
1245
1246     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1247         file_name = "ip4_tunnels"
1248         feature = "-base"
1249
1250     elif "ip4base" in test_name or "ip4scale" in test_name:
1251         file_name = "ip4"
1252         if "xl710" in test_name:
1253             feature = "-base-scale-features"
1254         elif "iacl" in test_name:
1255             feature = "-features-iacl"
1256         elif "oacl" in test_name:
1257             feature = "-features-oacl"
1258         elif "snat" in test_name or "cop" in test_name:
1259             feature = "-features"
1260         else:
1261             feature = "-base-scale"
1262
1263     elif "ip6base" in test_name or "ip6scale" in test_name:
1264         file_name = "ip6"
1265         feature = "-base-scale"
1266
1267     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1268             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1269             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1270         file_name = "l2"
1271         if "macip" in test_name:
1272             feature = "-features-macip"
1273         elif "iacl" in test_name:
1274             feature = "-features-iacl"
1275         elif "oacl" in test_name:
1276             feature = "-features-oacl"
1277         else:
1278             feature = "-base-scale"
1279
1280     if "x520" in test_name:
1281         nic = "x520-"
1282     elif "x710" in test_name:
1283         nic = "x710-"
1284     elif "xl710" in test_name:
1285         nic = "xl710-"
1286     elif "xxv710" in test_name:
1287         nic = "xxv710-"
1288     elif "vic1227" in test_name:
1289         nic = "vic1227-"
1290     elif "vic1385" in test_name:
1291         nic = "vic1385-"
1292     elif "x553" in test_name:
1293         nic = "x553-"
1294     else:
1295         nic = ""
1296     anchor += nic
1297
1298     if "64b" in test_name:
1299         framesize = "64b"
1300     elif "78b" in test_name:
1301         framesize = "78b"
1302     elif "imix" in test_name:
1303         framesize = "imix"
1304     elif "9000b" in test_name:
1305         framesize = "9000b"
1306     elif "1518b" in test_name:
1307         framesize = "1518b"
1308     elif "114b" in test_name:
1309         framesize = "114b"
1310     else:
1311         framesize = ""
1312     anchor += framesize + '-'
1313
1314     if "1t1c" in test_name:
1315         anchor += "1t1c"
1316     elif "2t2c" in test_name:
1317         anchor += "2t2c"
1318     elif "4t4c" in test_name:
1319         anchor += "4t4c"
1320     elif "2t1c" in test_name:
1321         anchor += "2t1c"
1322     elif "4t2c" in test_name:
1323         anchor += "4t2c"
1324     elif "8t4c" in test_name:
1325         anchor += "8t4c"
1326
1327     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1328         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1329
1330
1331 def table_performance_trending_dashboard_html(table, input_data):
1332     """Generate the table(s) with algorithm:
1333     table_performance_trending_dashboard_html specified in the specification
1334     file.
1335
1336     :param table: Table to generate.
1337     :param input_data: Data to process.
1338     :type table: dict
1339     :type input_data: InputData
1340     """
1341
1342     testbed = table.get("testbed", None)
1343     if testbed is None:
1344         logging.error("The testbed is not defined for the table '{0}'.".
1345                       format(table.get("title", "")))
1346         return
1347
1348     logging.info("  Generating the table {0} ...".
1349                  format(table.get("title", "")))
1350
1351     try:
1352         with open(table["input-file"], 'rb') as csv_file:
1353             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1354             csv_lst = [item for item in csv_content]
1355     except KeyError:
1356         logging.warning("The input file is not defined.")
1357         return
1358     except csv.Error as err:
1359         logging.warning("Not possible to process the file '{0}'.\n{1}".
1360                         format(table["input-file"], err))
1361         return
1362
1363     # Table:
1364     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1365
1366     # Table header:
1367     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1368     for idx, item in enumerate(csv_lst[0]):
1369         alignment = "left" if idx == 0 else "center"
1370         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1371         th.text = item
1372
1373     # Rows:
1374     colors = {"regression": ("#ffcccc", "#ff9999"),
1375               "progression": ("#c6ecc6", "#9fdf9f"),
1376               "normal": ("#e9f1fb", "#d4e4f7")}
1377     for r_idx, row in enumerate(csv_lst[1:]):
1378         if int(row[4]):
1379             color = "regression"
1380         elif int(row[5]):
1381             color = "progression"
1382         else:
1383             color = "normal"
1384         background = colors[color][r_idx % 2]
1385         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1386
1387         # Columns:
1388         for c_idx, item in enumerate(row):
1389             alignment = "left" if c_idx == 0 else "center"
1390             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1391             # Name:
1392             if c_idx == 0:
1393                 url = _generate_url("../trending/", testbed, item)
1394                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1395                 ref.text = item
1396             else:
1397                 td.text = item
1398     try:
1399         with open(table["output-file"], 'w') as html_file:
1400             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1401             html_file.write(".. raw:: html\n\n\t")
1402             html_file.write(ET.tostring(dashboard))
1403             html_file.write("\n\t<p><br><br></p>\n")
1404     except KeyError:
1405         logging.warning("The output file is not defined.")
1406         return
1407
1408
1409 def table_last_failed_tests(table, input_data):
1410     """Generate the table(s) with algorithm: table_last_failed_tests
1411     specified in the specification file.
1412
1413     :param table: Table to generate.
1414     :param input_data: Data to process.
1415     :type table: pandas.Series
1416     :type input_data: InputData
1417     """
1418
1419     logging.info("  Generating the table {0} ...".
1420                  format(table.get("title", "")))
1421
1422     # Transform the data
1423     logging.info("    Creating the data set for the {0} '{1}'.".
1424                  format(table.get("type", ""), table.get("title", "")))
1425     data = input_data.filter_data(table, continue_on_error=True)
1426
1427     if data is None or data.empty:
1428         logging.warn("    No data for the {0} '{1}'.".
1429                      format(table.get("type", ""), table.get("title", "")))
1430         return
1431
1432     tbl_list = list()
1433     for job, builds in table["data"].items():
1434         for build in builds:
1435             build = str(build)
1436             try:
1437                 version = input_data.metadata(job, build).get("version", "")
1438             except KeyError:
1439                 logging.error("Data for {job}: {build} is not present.".
1440                               format(job=job, build=build))
1441                 return
1442             tbl_list.append(build)
1443             tbl_list.append(version)
1444             failed_tests = list()
1445             passed = 0
1446             failed = 0
1447             for tst_name, tst_data in data[job][build].iteritems():
1448                 if tst_data["status"] != "FAIL":
1449                     passed += 1
1450                     continue
1451                 failed += 1
1452                 groups = re.search(REGEX_NIC, tst_data["parent"])
1453                 if not groups:
1454                     continue
1455                 nic = groups.group(0)
1456                 failed_tests.append("{0}-{1}".format(nic, tst_data["name"]))
1457             tbl_list.append(str(passed))
1458             tbl_list.append(str(failed))
1459             tbl_list.extend(failed_tests)
1460
1461     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1462     logging.info("    Writing file: '{0}'".format(file_name))
1463     with open(file_name, "w") as file_handler:
1464         for test in tbl_list:
1465             file_handler.write(test + '\n')
1466
1467
1468 def table_failed_tests(table, input_data):
1469     """Generate the table(s) with algorithm: table_failed_tests
1470     specified in the specification file.
1471
1472     :param table: Table to generate.
1473     :param input_data: Data to process.
1474     :type table: pandas.Series
1475     :type input_data: InputData
1476     """
1477
1478     logging.info("  Generating the table {0} ...".
1479                  format(table.get("title", "")))
1480
1481     # Transform the data
1482     logging.info("    Creating the data set for the {0} '{1}'.".
1483                  format(table.get("type", ""), table.get("title", "")))
1484     data = input_data.filter_data(table, continue_on_error=True)
1485
1486     # Prepare the header of the tables
1487     header = ["Test Case",
1488               "Failures [#]",
1489               "Last Failure [Time]",
1490               "Last Failure [VPP-Build-Id]",
1491               "Last Failure [CSIT-Job-Build-Id]"]
1492
1493     # Generate the data for the table according to the model in the table
1494     # specification
1495
1496     now = dt.utcnow()
1497     timeperiod = timedelta(int(table.get("window", 7)))
1498
1499     tbl_dict = dict()
1500     for job, builds in table["data"].items():
1501         for build in builds:
1502             build = str(build)
1503             for tst_name, tst_data in data[job][build].iteritems():
1504                 if tst_name.lower() in table.get("ignore-list", list()):
1505                     continue
1506                 if tbl_dict.get(tst_name, None) is None:
1507                     groups = re.search(REGEX_NIC, tst_data["parent"])
1508                     if not groups:
1509                         continue
1510                     nic = groups.group(0)
1511                     tbl_dict[tst_name] = {
1512                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1513                         "data": OrderedDict()}
1514                 try:
1515                     generated = input_data.metadata(job, build).\
1516                         get("generated", "")
1517                     if not generated:
1518                         continue
1519                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1520                     if (now - then) <= timeperiod:
1521                         tbl_dict[tst_name]["data"][build] = (
1522                             tst_data["status"],
1523                             generated,
1524                             input_data.metadata(job, build).get("version", ""),
1525                             build)
1526                 except (TypeError, KeyError) as err:
1527                     logging.warning("tst_name: {} - err: {}".
1528                                     format(tst_name, repr(err)))
1529
1530     max_fails = 0
1531     tbl_lst = list()
1532     for tst_data in tbl_dict.values():
1533         fails_nr = 0
1534         fails_last_date = ""
1535         fails_last_vpp = ""
1536         fails_last_csit = ""
1537         for val in tst_data["data"].values():
1538             if val[0] == "FAIL":
1539                 fails_nr += 1
1540                 fails_last_date = val[1]
1541                 fails_last_vpp = val[2]
1542                 fails_last_csit = val[3]
1543         if fails_nr:
1544             max_fails = fails_nr if fails_nr > max_fails else max_fails
1545             tbl_lst.append([tst_data["name"],
1546                             fails_nr,
1547                             fails_last_date,
1548                             fails_last_vpp,
1549                             "mrr-daily-build-{0}".format(fails_last_csit)])
1550
1551     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1552     tbl_sorted = list()
1553     for nrf in range(max_fails, -1, -1):
1554         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1555         tbl_sorted.extend(tbl_fails)
1556     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1557
1558     logging.info("    Writing file: '{0}'".format(file_name))
1559     with open(file_name, "w") as file_handler:
1560         file_handler.write(",".join(header) + "\n")
1561         for test in tbl_sorted:
1562             file_handler.write(",".join([str(item) for item in test]) + '\n')
1563
1564     txt_file_name = "{0}.txt".format(table["output-file"])
1565     logging.info("    Writing file: '{0}'".format(txt_file_name))
1566     convert_csv_to_pretty_txt(file_name, txt_file_name)
1567
1568
1569 def table_failed_tests_html(table, input_data):
1570     """Generate the table(s) with algorithm: table_failed_tests_html
1571     specified in the specification file.
1572
1573     :param table: Table to generate.
1574     :param input_data: Data to process.
1575     :type table: pandas.Series
1576     :type input_data: InputData
1577     """
1578
1579     testbed = table.get("testbed", None)
1580     if testbed is None:
1581         logging.error("The testbed is not defined for the table '{0}'.".
1582                       format(table.get("title", "")))
1583         return
1584
1585     logging.info("  Generating the table {0} ...".
1586                  format(table.get("title", "")))
1587
1588     try:
1589         with open(table["input-file"], 'rb') as csv_file:
1590             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1591             csv_lst = [item for item in csv_content]
1592     except KeyError:
1593         logging.warning("The input file is not defined.")
1594         return
1595     except csv.Error as err:
1596         logging.warning("Not possible to process the file '{0}'.\n{1}".
1597                         format(table["input-file"], err))
1598         return
1599
1600     # Table:
1601     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1602
1603     # Table header:
1604     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1605     for idx, item in enumerate(csv_lst[0]):
1606         alignment = "left" if idx == 0 else "center"
1607         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1608         th.text = item
1609
1610     # Rows:
1611     colors = ("#e9f1fb", "#d4e4f7")
1612     for r_idx, row in enumerate(csv_lst[1:]):
1613         background = colors[r_idx % 2]
1614         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1615
1616         # Columns:
1617         for c_idx, item in enumerate(row):
1618             alignment = "left" if c_idx == 0 else "center"
1619             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1620             # Name:
1621             if c_idx == 0:
1622                 url = _generate_url("../trending/", testbed, item)
1623                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1624                 ref.text = item
1625             else:
1626                 td.text = item
1627     try:
1628         with open(table["output-file"], 'w') as html_file:
1629             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1630             html_file.write(".. raw:: html\n\n\t")
1631             html_file.write(ET.tostring(failed_tests))
1632             html_file.write("\n\t<p><br><br></p>\n")
1633     except KeyError:
1634         logging.warning("The output file is not defined.")
1635         return