Report: Add DNV data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Rec Rate"
214         else:
215             hdr_param = "Thput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         topo = "2n-skx" if "2n-skx" in job else ""
239         for build in builds:
240             for tst_name, tst_data in data[job][str(build)].iteritems():
241                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
243                     replace("-ndrdisc", "").replace("-pdr", "").\
244                     replace("-ndr", "").\
245                     replace("1t1c", "1c").replace("2t1c", "1c").\
246                     replace("2t2c", "2c").replace("4t2c", "2c").\
247                     replace("4t4c", "4c").replace("8t4c", "4c")
248                 if "across topologies" in table["title"].lower():
249                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
250                 if tbl_dict.get(tst_name_mod, None) is None:
251                     groups = re.search(REGEX_NIC, tst_data["parent"])
252                     nic = groups.group(0) if groups else ""
253                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254                                                           split("-")[:-1]))
255                     if "across testbeds" in table["title"].lower() or \
256                             "across topologies" in table["title"].lower():
257                         name = name.\
258                             replace("1t1c", "1c").replace("2t1c", "1c").\
259                             replace("2t2c", "2c").replace("4t2c", "2c").\
260                             replace("4t4c", "4c").replace("8t4c", "4c")
261                     tbl_dict[tst_name_mod] = {"name": name,
262                                               "ref-data": list(),
263                                               "cmp-data": list()}
264                 try:
265                     # TODO: Re-work when NDRPDRDISC tests are not used
266                     if table["include-tests"] == "MRR":
267                         tbl_dict[tst_name_mod]["ref-data"]. \
268                             append(tst_data["result"]["receive-rate"].avg)
269                     elif table["include-tests"] == "PDR":
270                         if tst_data["type"] == "PDR":
271                             tbl_dict[tst_name_mod]["ref-data"]. \
272                                 append(tst_data["throughput"]["value"])
273                         elif tst_data["type"] == "NDRPDR":
274                             tbl_dict[tst_name_mod]["ref-data"].append(
275                                 tst_data["throughput"]["PDR"]["LOWER"])
276                     elif table["include-tests"] == "NDR":
277                         if tst_data["type"] == "NDR":
278                             tbl_dict[tst_name_mod]["ref-data"]. \
279                                 append(tst_data["throughput"]["value"])
280                         elif tst_data["type"] == "NDRPDR":
281                             tbl_dict[tst_name_mod]["ref-data"].append(
282                                 tst_data["throughput"]["NDR"]["LOWER"])
283                     else:
284                         continue
285                 except TypeError:
286                     pass  # No data in output.xml for this test
287
288     for job, builds in table["compare"]["data"].items():
289         for build in builds:
290             for tst_name, tst_data in data[job][str(build)].iteritems():
291                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293                     replace("-ndrdisc", "").replace("-pdr", ""). \
294                     replace("-ndr", "").\
295                     replace("1t1c", "1c").replace("2t1c", "1c").\
296                     replace("2t2c", "2c").replace("4t2c", "2c").\
297                     replace("4t4c", "4c").replace("8t4c", "4c")
298                 if "across topologies" in table["title"].lower():
299                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
300                 if tbl_dict.get(tst_name_mod, None) is None:
301                     groups = re.search(REGEX_NIC, tst_data["parent"])
302                     nic = groups.group(0) if groups else ""
303                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
304                                                           split("-")[:-1]))
305                     if "across testbeds" in table["title"].lower() or \
306                             "across topologies" in table["title"].lower():
307                         name = name.\
308                             replace("1t1c", "1c").replace("2t1c", "1c").\
309                             replace("2t2c", "2c").replace("4t2c", "2c").\
310                             replace("4t4c", "4c").replace("8t4c", "4c")
311                     tbl_dict[tst_name_mod] = {"name": name,
312                                               "ref-data": list(),
313                                               "cmp-data": list()}
314                 try:
315                     # TODO: Re-work when NDRPDRDISC tests are not used
316                     if table["include-tests"] == "MRR":
317                         tbl_dict[tst_name_mod]["cmp-data"]. \
318                             append(tst_data["result"]["receive-rate"].avg)
319                     elif table["include-tests"] == "PDR":
320                         if tst_data["type"] == "PDR":
321                             tbl_dict[tst_name_mod]["cmp-data"]. \
322                                 append(tst_data["throughput"]["value"])
323                         elif tst_data["type"] == "NDRPDR":
324                             tbl_dict[tst_name_mod]["cmp-data"].append(
325                                 tst_data["throughput"]["PDR"]["LOWER"])
326                     elif table["include-tests"] == "NDR":
327                         if tst_data["type"] == "NDR":
328                             tbl_dict[tst_name_mod]["cmp-data"]. \
329                                 append(tst_data["throughput"]["value"])
330                         elif tst_data["type"] == "NDRPDR":
331                             tbl_dict[tst_name_mod]["cmp-data"].append(
332                                 tst_data["throughput"]["NDR"]["LOWER"])
333                     else:
334                         continue
335                 except (KeyError, TypeError):
336                     pass
337     if history:
338         for item in history:
339             for job, builds in item["data"].items():
340                 for build in builds:
341                     for tst_name, tst_data in data[job][str(build)].iteritems():
342                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344                             replace("-ndrdisc", "").replace("-pdr", ""). \
345                             replace("-ndr", "").\
346                             replace("1t1c", "1c").replace("2t1c", "1c").\
347                             replace("2t2c", "2c").replace("4t2c", "2c").\
348                             replace("4t4c", "4c").replace("8t4c", "4c")
349                         if "across topologies" in table["title"].lower():
350                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
351                         if tbl_dict.get(tst_name_mod, None) is None:
352                             continue
353                         if tbl_dict[tst_name_mod].get("history", None) is None:
354                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
355                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
356                                                              None) is None:
357                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
358                                 list()
359                         try:
360                             # TODO: Re-work when NDRPDRDISC tests are not used
361                             if table["include-tests"] == "MRR":
362                                 tbl_dict[tst_name_mod]["history"][item["title"
363                                 ]].append(tst_data["result"]["receive-rate"].
364                                           avg)
365                             elif table["include-tests"] == "PDR":
366                                 if tst_data["type"] == "PDR":
367                                     tbl_dict[tst_name_mod]["history"][
368                                         item["title"]].\
369                                         append(tst_data["throughput"]["value"])
370                                 elif tst_data["type"] == "NDRPDR":
371                                     tbl_dict[tst_name_mod]["history"][item[
372                                         "title"]].append(tst_data["throughput"][
373                                         "PDR"]["LOWER"])
374                             elif table["include-tests"] == "NDR":
375                                 if tst_data["type"] == "NDR":
376                                     tbl_dict[tst_name_mod]["history"][
377                                         item["title"]].\
378                                         append(tst_data["throughput"]["value"])
379                                 elif tst_data["type"] == "NDRPDR":
380                                     tbl_dict[tst_name_mod]["history"][item[
381                                         "title"]].append(tst_data["throughput"][
382                                         "NDR"]["LOWER"])
383                             else:
384                                 continue
385                         except (TypeError, KeyError):
386                             pass
387
388     tbl_lst = list()
389     footnote = False
390     for tst_name in tbl_dict.keys():
391         item = [tbl_dict[tst_name]["name"], ]
392         if history:
393             if tbl_dict[tst_name].get("history", None) is not None:
394                 for hist_data in tbl_dict[tst_name]["history"].values():
395                     if hist_data:
396                         item.append(round(mean(hist_data) / 1000000, 2))
397                         item.append(round(stdev(hist_data) / 1000000, 2))
398                     else:
399                         item.extend(["Not tested", "Not tested"])
400             else:
401                 item.extend(["Not tested", "Not tested"])
402         data_t = tbl_dict[tst_name]["ref-data"]
403         if data_t:
404             item.append(round(mean(data_t) / 1000000, 2))
405             item.append(round(stdev(data_t) / 1000000, 2))
406         else:
407             item.extend(["Not tested", "Not tested"])
408         data_t = tbl_dict[tst_name]["cmp-data"]
409         if data_t:
410             item.append(round(mean(data_t) / 1000000, 2))
411             item.append(round(stdev(data_t) / 1000000, 2))
412         else:
413             item.extend(["Not tested", "Not tested"])
414         if item[-2] == "Not tested":
415             pass
416         elif item[-4] == "Not tested":
417             item.append("New in CSIT-1908")
418         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419             item.append("See footnote [1]")
420             footnote = True
421         elif item[-4] != 0:
422             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
423         if (len(item) == len(header)) and (item[-3] != "Not tested"):
424             tbl_lst.append(item)
425
426     # Sort the table:
427     # 1. New in CSIT-XXXX
428     # 2. See footnote
429     # 3. Delta
430     tbl_new = list()
431     tbl_see = list()
432     tbl_delta = list()
433     for item in tbl_lst:
434         if isinstance(item[-1], str):
435             if "New in CSIT" in item[-1]:
436                 tbl_new.append(item)
437             elif "See footnote" in item[-1]:
438                 tbl_see.append(item)
439         else:
440             tbl_delta.append(item)
441
442     # Sort the tables:
443     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
444     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
445     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
446     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
447
448     # Put the tables together:
449     tbl_lst = list()
450     tbl_lst.extend(tbl_new)
451     tbl_lst.extend(tbl_see)
452     tbl_lst.extend(tbl_delta)
453
454     # Generate csv tables:
455     csv_file = "{0}.csv".format(table["output-file"])
456     with open(csv_file, "w") as file_handler:
457         file_handler.write(header_str)
458         for test in tbl_lst:
459             file_handler.write(",".join([str(item) for item in test]) + "\n")
460
461     txt_file_name = "{0}.txt".format(table["output-file"])
462     convert_csv_to_pretty_txt(csv_file, txt_file_name)
463
464     if footnote:
465         with open(txt_file_name, 'a') as txt_file:
466             txt_file.writelines([
467                 "\nFootnotes:\n",
468                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
469                 "2-node testbeds, dot1q encapsulation is now used on both "
470                 "links of SUT.\n",
471                 "    Previously dot1q was used only on a single link with the "
472                 "other link carrying untagged Ethernet frames. This changes "
473                 "results\n",
474                 "    in slightly lower throughput in CSIT-1908 for these "
475                 "tests. See release notes."
476             ])
477
478
479 def table_performance_comparison_nic(table, input_data):
480     """Generate the table(s) with algorithm: table_performance_comparison
481     specified in the specification file.
482
483     :param table: Table to generate.
484     :param input_data: Data to process.
485     :type table: pandas.Series
486     :type input_data: InputData
487     """
488
489     logging.info("  Generating the table {0} ...".
490                  format(table.get("title", "")))
491
492     # Transform the data
493     logging.info("    Creating the data set for the {0} '{1}'.".
494                  format(table.get("type", ""), table.get("title", "")))
495     data = input_data.filter_data(table, continue_on_error=True)
496
497     # Prepare the header of the tables
498     try:
499         header = ["Test case", ]
500
501         if table["include-tests"] == "MRR":
502             hdr_param = "Rec Rate"
503         else:
504             hdr_param = "Thput"
505
506         history = table.get("history", None)
507         if history:
508             for item in history:
509                 header.extend(
510                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
511                      "{0} Stdev [Mpps]".format(item["title"])])
512         header.extend(
513             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
514              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
515              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
516              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
517              "Delta [%]"])
518         header_str = ",".join(header) + "\n"
519     except (AttributeError, KeyError) as err:
520         logging.error("The model is invalid, missing parameter: {0}".
521                       format(err))
522         return
523
524     # Prepare data to the table:
525     tbl_dict = dict()
526     for job, builds in table["reference"]["data"].items():
527         topo = "2n-skx" if "2n-skx" in job else ""
528         for build in builds:
529             for tst_name, tst_data in data[job][str(build)].iteritems():
530                 if table["reference"]["nic"] not in tst_data["tags"]:
531                     continue
532                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
533                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
534                     replace("-ndrdisc", "").replace("-pdr", "").\
535                     replace("-ndr", "").\
536                     replace("1t1c", "1c").replace("2t1c", "1c").\
537                     replace("2t2c", "2c").replace("4t2c", "2c").\
538                     replace("4t4c", "4c").replace("8t4c", "4c")
539                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
540                 if "across topologies" in table["title"].lower():
541                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
542                 if tbl_dict.get(tst_name_mod, None) is None:
543                     name = "{0}".format("-".join(tst_data["name"].
544                                                  split("-")[:-1]))
545                     if "across testbeds" in table["title"].lower() or \
546                             "across topologies" in table["title"].lower():
547                         name = name.\
548                             replace("1t1c", "1c").replace("2t1c", "1c").\
549                             replace("2t2c", "2c").replace("4t2c", "2c").\
550                             replace("4t4c", "4c").replace("8t4c", "4c")
551                     tbl_dict[tst_name_mod] = {"name": name,
552                                               "ref-data": list(),
553                                               "cmp-data": list()}
554                 try:
555                     # TODO: Re-work when NDRPDRDISC tests are not used
556                     if table["include-tests"] == "MRR":
557                         tbl_dict[tst_name_mod]["ref-data"]. \
558                             append(tst_data["result"]["receive-rate"].avg)
559                     elif table["include-tests"] == "PDR":
560                         if tst_data["type"] == "PDR":
561                             tbl_dict[tst_name_mod]["ref-data"]. \
562                                 append(tst_data["throughput"]["value"])
563                         elif tst_data["type"] == "NDRPDR":
564                             tbl_dict[tst_name_mod]["ref-data"].append(
565                                 tst_data["throughput"]["PDR"]["LOWER"])
566                     elif table["include-tests"] == "NDR":
567                         if tst_data["type"] == "NDR":
568                             tbl_dict[tst_name_mod]["ref-data"]. \
569                                 append(tst_data["throughput"]["value"])
570                         elif tst_data["type"] == "NDRPDR":
571                             tbl_dict[tst_name_mod]["ref-data"].append(
572                                 tst_data["throughput"]["NDR"]["LOWER"])
573                     else:
574                         continue
575                 except TypeError:
576                     pass  # No data in output.xml for this test
577
578     for job, builds in table["compare"]["data"].items():
579         for build in builds:
580             for tst_name, tst_data in data[job][str(build)].iteritems():
581                 if table["compare"]["nic"] not in tst_data["tags"]:
582                     continue
583                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
584                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
585                     replace("-ndrdisc", "").replace("-pdr", ""). \
586                     replace("-ndr", "").\
587                     replace("1t1c", "1c").replace("2t1c", "1c").\
588                     replace("2t2c", "2c").replace("4t2c", "2c").\
589                     replace("4t4c", "4c").replace("8t4c", "4c")
590                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
591                 if "across topologies" in table["title"].lower():
592                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
593                 if tbl_dict.get(tst_name_mod, None) is None:
594                     name = "{0}".format("-".join(tst_data["name"].
595                                                  split("-")[:-1]))
596                     if "across testbeds" in table["title"].lower() or \
597                             "across topologies" in table["title"].lower():
598                         name = name.\
599                             replace("1t1c", "1c").replace("2t1c", "1c").\
600                             replace("2t2c", "2c").replace("4t2c", "2c").\
601                             replace("4t4c", "4c").replace("8t4c", "4c")
602                     tbl_dict[tst_name_mod] = {"name": name,
603                                               "ref-data": list(),
604                                               "cmp-data": list()}
605                 try:
606                     # TODO: Re-work when NDRPDRDISC tests are not used
607                     if table["include-tests"] == "MRR":
608                         tbl_dict[tst_name_mod]["cmp-data"]. \
609                             append(tst_data["result"]["receive-rate"].avg)
610                     elif table["include-tests"] == "PDR":
611                         if tst_data["type"] == "PDR":
612                             tbl_dict[tst_name_mod]["cmp-data"]. \
613                                 append(tst_data["throughput"]["value"])
614                         elif tst_data["type"] == "NDRPDR":
615                             tbl_dict[tst_name_mod]["cmp-data"].append(
616                                 tst_data["throughput"]["PDR"]["LOWER"])
617                     elif table["include-tests"] == "NDR":
618                         if tst_data["type"] == "NDR":
619                             tbl_dict[tst_name_mod]["cmp-data"]. \
620                                 append(tst_data["throughput"]["value"])
621                         elif tst_data["type"] == "NDRPDR":
622                             tbl_dict[tst_name_mod]["cmp-data"].append(
623                                 tst_data["throughput"]["NDR"]["LOWER"])
624                     else:
625                         continue
626                 except (KeyError, TypeError):
627                     pass
628
629     if history:
630         for item in history:
631             for job, builds in item["data"].items():
632                 for build in builds:
633                     for tst_name, tst_data in data[job][str(build)].iteritems():
634                         if item["nic"] not in tst_data["tags"]:
635                             continue
636                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
637                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
638                             replace("-ndrdisc", "").replace("-pdr", ""). \
639                             replace("-ndr", "").\
640                             replace("1t1c", "1c").replace("2t1c", "1c").\
641                             replace("2t2c", "2c").replace("4t2c", "2c").\
642                             replace("4t4c", "4c").replace("8t4c", "4c")
643                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
644                         if "across topologies" in table["title"].lower():
645                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
646                         if tbl_dict.get(tst_name_mod, None) is None:
647                             continue
648                         if tbl_dict[tst_name_mod].get("history", None) is None:
649                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
650                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
651                                                              None) is None:
652                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
653                                 list()
654                         try:
655                             # TODO: Re-work when NDRPDRDISC tests are not used
656                             if table["include-tests"] == "MRR":
657                                 tbl_dict[tst_name_mod]["history"][item["title"
658                                 ]].append(tst_data["result"]["receive-rate"].
659                                           avg)
660                             elif table["include-tests"] == "PDR":
661                                 if tst_data["type"] == "PDR":
662                                     tbl_dict[tst_name_mod]["history"][
663                                         item["title"]].\
664                                         append(tst_data["throughput"]["value"])
665                                 elif tst_data["type"] == "NDRPDR":
666                                     tbl_dict[tst_name_mod]["history"][item[
667                                         "title"]].append(tst_data["throughput"][
668                                         "PDR"]["LOWER"])
669                             elif table["include-tests"] == "NDR":
670                                 if tst_data["type"] == "NDR":
671                                     tbl_dict[tst_name_mod]["history"][
672                                         item["title"]].\
673                                         append(tst_data["throughput"]["value"])
674                                 elif tst_data["type"] == "NDRPDR":
675                                     tbl_dict[tst_name_mod]["history"][item[
676                                         "title"]].append(tst_data["throughput"][
677                                         "NDR"]["LOWER"])
678                             else:
679                                 continue
680                         except (TypeError, KeyError):
681                             pass
682
683     tbl_lst = list()
684     footnote = False
685     for tst_name in tbl_dict.keys():
686         item = [tbl_dict[tst_name]["name"], ]
687         if history:
688             if tbl_dict[tst_name].get("history", None) is not None:
689                 for hist_data in tbl_dict[tst_name]["history"].values():
690                     if hist_data:
691                         item.append(round(mean(hist_data) / 1000000, 2))
692                         item.append(round(stdev(hist_data) / 1000000, 2))
693                     else:
694                         item.extend(["Not tested", "Not tested"])
695             else:
696                 item.extend(["Not tested", "Not tested"])
697         data_t = tbl_dict[tst_name]["ref-data"]
698         if data_t:
699             item.append(round(mean(data_t) / 1000000, 2))
700             item.append(round(stdev(data_t) / 1000000, 2))
701         else:
702             item.extend(["Not tested", "Not tested"])
703         data_t = tbl_dict[tst_name]["cmp-data"]
704         if data_t:
705             item.append(round(mean(data_t) / 1000000, 2))
706             item.append(round(stdev(data_t) / 1000000, 2))
707         else:
708             item.extend(["Not tested", "Not tested"])
709         if item[-2] == "Not tested":
710             pass
711         elif item[-4] == "Not tested":
712             item.append("New in CSIT-1908")
713         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
714             item.append("See footnote [1]")
715             footnote = True
716         elif item[-4] != 0:
717             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
718         if (len(item) == len(header)) and (item[-3] != "Not tested"):
719             tbl_lst.append(item)
720
721     # Sort the table:
722     # 1. New in CSIT-XXXX
723     # 2. See footnote
724     # 3. Delta
725     tbl_new = list()
726     tbl_see = list()
727     tbl_delta = list()
728     for item in tbl_lst:
729         if isinstance(item[-1], str):
730             if "New in CSIT" in item[-1]:
731                 tbl_new.append(item)
732             elif "See footnote" in item[-1]:
733                 tbl_see.append(item)
734         else:
735             tbl_delta.append(item)
736
737     # Sort the tables:
738     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
739     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
740     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
741     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
742
743     # Put the tables together:
744     tbl_lst = list()
745     tbl_lst.extend(tbl_new)
746     tbl_lst.extend(tbl_see)
747     tbl_lst.extend(tbl_delta)
748
749     # Generate csv tables:
750     csv_file = "{0}.csv".format(table["output-file"])
751     with open(csv_file, "w") as file_handler:
752         file_handler.write(header_str)
753         for test in tbl_lst:
754             file_handler.write(",".join([str(item) for item in test]) + "\n")
755
756     txt_file_name = "{0}.txt".format(table["output-file"])
757     convert_csv_to_pretty_txt(csv_file, txt_file_name)
758
759     if footnote:
760         with open(txt_file_name, 'a') as txt_file:
761             txt_file.writelines([
762                 "\nFootnotes:\n",
763                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
764                 "2-node testbeds, dot1q encapsulation is now used on both "
765                 "links of SUT.\n",
766                 "    Previously dot1q was used only on a single link with the "
767                 "other link carrying untagged Ethernet frames. This changes "
768                 "results\n",
769                 "    in slightly lower throughput in CSIT-1908 for these "
770                 "tests. See release notes."
771             ])
772
773
774 def table_nics_comparison(table, input_data):
775     """Generate the table(s) with algorithm: table_nics_comparison
776     specified in the specification file.
777
778     :param table: Table to generate.
779     :param input_data: Data to process.
780     :type table: pandas.Series
781     :type input_data: InputData
782     """
783
784     logging.info("  Generating the table {0} ...".
785                  format(table.get("title", "")))
786
787     # Transform the data
788     logging.info("    Creating the data set for the {0} '{1}'.".
789                  format(table.get("type", ""), table.get("title", "")))
790     data = input_data.filter_data(table, continue_on_error=True)
791
792     # Prepare the header of the tables
793     try:
794         header = ["Test case", ]
795
796         if table["include-tests"] == "MRR":
797             hdr_param = "Rec Rate"
798         else:
799             hdr_param = "Thput"
800
801         header.extend(
802             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
803              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
804              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
805              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
806              "Delta [%]"])
807         header_str = ",".join(header) + "\n"
808     except (AttributeError, KeyError) as err:
809         logging.error("The model is invalid, missing parameter: {0}".
810                       format(err))
811         return
812
813     # Prepare data to the table:
814     tbl_dict = dict()
815     for job, builds in table["data"].items():
816         for build in builds:
817             for tst_name, tst_data in data[job][str(build)].iteritems():
818                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
819                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
820                     replace("-ndrdisc", "").replace("-pdr", "").\
821                     replace("-ndr", "").\
822                     replace("1t1c", "1c").replace("2t1c", "1c").\
823                     replace("2t2c", "2c").replace("4t2c", "2c").\
824                     replace("4t4c", "4c").replace("8t4c", "4c")
825                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
826                 if tbl_dict.get(tst_name_mod, None) is None:
827                     name = "-".join(tst_data["name"].split("-")[:-1])
828                     tbl_dict[tst_name_mod] = {"name": name,
829                                               "ref-data": list(),
830                                               "cmp-data": list()}
831                 try:
832                     if table["include-tests"] == "MRR":
833                         result = tst_data["result"]["receive-rate"].avg
834                     elif table["include-tests"] == "PDR":
835                         result = tst_data["throughput"]["PDR"]["LOWER"]
836                     elif table["include-tests"] == "NDR":
837                         result = tst_data["throughput"]["NDR"]["LOWER"]
838                     else:
839                         result = None
840
841                     if result:
842                         if table["reference"]["nic"] in tst_data["tags"]:
843                             tbl_dict[tst_name_mod]["ref-data"].append(result)
844                         elif table["compare"]["nic"] in tst_data["tags"]:
845                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
846                 except (TypeError, KeyError) as err:
847                     logging.debug("No data for {0}".format(tst_name))
848                     logging.debug(repr(err))
849                     # No data in output.xml for this test
850
851     tbl_lst = list()
852     for tst_name in tbl_dict.keys():
853         item = [tbl_dict[tst_name]["name"], ]
854         data_t = tbl_dict[tst_name]["ref-data"]
855         if data_t:
856             item.append(round(mean(data_t) / 1000000, 2))
857             item.append(round(stdev(data_t) / 1000000, 2))
858         else:
859             item.extend([None, None])
860         data_t = tbl_dict[tst_name]["cmp-data"]
861         if data_t:
862             item.append(round(mean(data_t) / 1000000, 2))
863             item.append(round(stdev(data_t) / 1000000, 2))
864         else:
865             item.extend([None, None])
866         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
867             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
868         if len(item) == len(header):
869             tbl_lst.append(item)
870
871     # Sort the table according to the relative change
872     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
873
874     # Generate csv tables:
875     csv_file = "{0}.csv".format(table["output-file"])
876     with open(csv_file, "w") as file_handler:
877         file_handler.write(header_str)
878         for test in tbl_lst:
879             file_handler.write(",".join([str(item) for item in test]) + "\n")
880
881     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
882
883
884 def table_soak_vs_ndr(table, input_data):
885     """Generate the table(s) with algorithm: table_soak_vs_ndr
886     specified in the specification file.
887
888     :param table: Table to generate.
889     :param input_data: Data to process.
890     :type table: pandas.Series
891     :type input_data: InputData
892     """
893
894     logging.info("  Generating the table {0} ...".
895                  format(table.get("title", "")))
896
897     # Transform the data
898     logging.info("    Creating the data set for the {0} '{1}'.".
899                  format(table.get("type", ""), table.get("title", "")))
900     data = input_data.filter_data(table, continue_on_error=True)
901
902     # Prepare the header of the table
903     try:
904         header = [
905             "Test case",
906             "{0} Thput [Mpps]".format(table["reference"]["title"]),
907             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
908             "{0} Thput [Mpps]".format(table["compare"]["title"]),
909             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
910             "Delta [%]", "Stdev of delta [%]"]
911         header_str = ",".join(header) + "\n"
912     except (AttributeError, KeyError) as err:
913         logging.error("The model is invalid, missing parameter: {0}".
914                       format(err))
915         return
916
917     # Create a list of available SOAK test results:
918     tbl_dict = dict()
919     for job, builds in table["compare"]["data"].items():
920         for build in builds:
921             for tst_name, tst_data in data[job][str(build)].iteritems():
922                 if tst_data["type"] == "SOAK":
923                     tst_name_mod = tst_name.replace("-soak", "")
924                     if tbl_dict.get(tst_name_mod, None) is None:
925                         groups = re.search(REGEX_NIC, tst_data["parent"])
926                         nic = groups.group(0) if groups else ""
927                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
928                                                               split("-")[:-1]))
929                         tbl_dict[tst_name_mod] = {
930                             "name": name,
931                             "ref-data": list(),
932                             "cmp-data": list()
933                         }
934                     try:
935                         tbl_dict[tst_name_mod]["cmp-data"].append(
936                             tst_data["throughput"]["LOWER"])
937                     except (KeyError, TypeError):
938                         pass
939     tests_lst = tbl_dict.keys()
940
941     # Add corresponding NDR test results:
942     for job, builds in table["reference"]["data"].items():
943         for build in builds:
944             for tst_name, tst_data in data[job][str(build)].iteritems():
945                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
946                     replace("-mrr", "")
947                 if tst_name_mod in tests_lst:
948                     try:
949                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
950                             if table["include-tests"] == "MRR":
951                                 result = tst_data["result"]["receive-rate"].avg
952                             elif table["include-tests"] == "PDR":
953                                 result = tst_data["throughput"]["PDR"]["LOWER"]
954                             elif table["include-tests"] == "NDR":
955                                 result = tst_data["throughput"]["NDR"]["LOWER"]
956                             else:
957                                 result = None
958                             if result is not None:
959                                 tbl_dict[tst_name_mod]["ref-data"].append(
960                                     result)
961                     except (KeyError, TypeError):
962                         continue
963
964     tbl_lst = list()
965     for tst_name in tbl_dict.keys():
966         item = [tbl_dict[tst_name]["name"], ]
967         data_r = tbl_dict[tst_name]["ref-data"]
968         if data_r:
969             data_r_mean = mean(data_r)
970             item.append(round(data_r_mean / 1000000, 2))
971             data_r_stdev = stdev(data_r)
972             item.append(round(data_r_stdev / 1000000, 2))
973         else:
974             data_r_mean = None
975             data_r_stdev = None
976             item.extend([None, None])
977         data_c = tbl_dict[tst_name]["cmp-data"]
978         if data_c:
979             data_c_mean = mean(data_c)
980             item.append(round(data_c_mean / 1000000, 2))
981             data_c_stdev = stdev(data_c)
982             item.append(round(data_c_stdev / 1000000, 2))
983         else:
984             data_c_mean = None
985             data_c_stdev = None
986             item.extend([None, None])
987         if data_r_mean and data_c_mean:
988             delta, d_stdev = relative_change_stdev(
989                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
990             item.append(round(delta, 2))
991             item.append(round(d_stdev, 2))
992             tbl_lst.append(item)
993
994     # Sort the table according to the relative change
995     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
996
997     # Generate csv tables:
998     csv_file = "{0}.csv".format(table["output-file"])
999     with open(csv_file, "w") as file_handler:
1000         file_handler.write(header_str)
1001         for test in tbl_lst:
1002             file_handler.write(",".join([str(item) for item in test]) + "\n")
1003
1004     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
1005
1006
1007 def table_performance_trending_dashboard(table, input_data):
1008     """Generate the table(s) with algorithm:
1009     table_performance_trending_dashboard
1010     specified in the specification file.
1011
1012     :param table: Table to generate.
1013     :param input_data: Data to process.
1014     :type table: pandas.Series
1015     :type input_data: InputData
1016     """
1017
1018     logging.info("  Generating the table {0} ...".
1019                  format(table.get("title", "")))
1020
1021     # Transform the data
1022     logging.info("    Creating the data set for the {0} '{1}'.".
1023                  format(table.get("type", ""), table.get("title", "")))
1024     data = input_data.filter_data(table, continue_on_error=True)
1025
1026     # Prepare the header of the tables
1027     header = ["Test Case",
1028               "Trend [Mpps]",
1029               "Short-Term Change [%]",
1030               "Long-Term Change [%]",
1031               "Regressions [#]",
1032               "Progressions [#]"
1033               ]
1034     header_str = ",".join(header) + "\n"
1035
1036     # Prepare data to the table:
1037     tbl_dict = dict()
1038     for job, builds in table["data"].items():
1039         for build in builds:
1040             for tst_name, tst_data in data[job][str(build)].iteritems():
1041                 if tst_name.lower() in table.get("ignore-list", list()):
1042                     continue
1043                 if tbl_dict.get(tst_name, None) is None:
1044                     groups = re.search(REGEX_NIC, tst_data["parent"])
1045                     if not groups:
1046                         continue
1047                     nic = groups.group(0)
1048                     tbl_dict[tst_name] = {
1049                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1050                         "data": OrderedDict()}
1051                 try:
1052                     tbl_dict[tst_name]["data"][str(build)] = \
1053                         tst_data["result"]["receive-rate"]
1054                 except (TypeError, KeyError):
1055                     pass  # No data in output.xml for this test
1056
1057     tbl_lst = list()
1058     for tst_name in tbl_dict.keys():
1059         data_t = tbl_dict[tst_name]["data"]
1060         if len(data_t) < 2:
1061             continue
1062
1063         classification_lst, avgs = classify_anomalies(data_t)
1064
1065         win_size = min(len(data_t), table["window"])
1066         long_win_size = min(len(data_t), table["long-trend-window"])
1067
1068         try:
1069             max_long_avg = max(
1070                 [x for x in avgs[-long_win_size:-win_size]
1071                  if not isnan(x)])
1072         except ValueError:
1073             max_long_avg = nan
1074         last_avg = avgs[-1]
1075         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1076
1077         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1078             rel_change_last = nan
1079         else:
1080             rel_change_last = round(
1081                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1082
1083         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1084             rel_change_long = nan
1085         else:
1086             rel_change_long = round(
1087                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1088
1089         if classification_lst:
1090             if isnan(rel_change_last) and isnan(rel_change_long):
1091                 continue
1092             if (isnan(last_avg) or
1093                 isnan(rel_change_last) or
1094                 isnan(rel_change_long)):
1095                 continue
1096             tbl_lst.append(
1097                 [tbl_dict[tst_name]["name"],
1098                  round(last_avg / 1000000, 2),
1099                  rel_change_last,
1100                  rel_change_long,
1101                  classification_lst[-win_size:].count("regression"),
1102                  classification_lst[-win_size:].count("progression")])
1103
1104     tbl_lst.sort(key=lambda rel: rel[0])
1105
1106     tbl_sorted = list()
1107     for nrr in range(table["window"], -1, -1):
1108         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1109         for nrp in range(table["window"], -1, -1):
1110             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1111             tbl_out.sort(key=lambda rel: rel[2])
1112             tbl_sorted.extend(tbl_out)
1113
1114     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1115
1116     logging.info("    Writing file: '{0}'".format(file_name))
1117     with open(file_name, "w") as file_handler:
1118         file_handler.write(header_str)
1119         for test in tbl_sorted:
1120             file_handler.write(",".join([str(item) for item in test]) + '\n')
1121
1122     txt_file_name = "{0}.txt".format(table["output-file"])
1123     logging.info("    Writing file: '{0}'".format(txt_file_name))
1124     convert_csv_to_pretty_txt(file_name, txt_file_name)
1125
1126
1127 def _generate_url(base, testbed, test_name):
1128     """Generate URL to a trending plot from the name of the test case.
1129
1130     :param base: The base part of URL common to all test cases.
1131     :param testbed: The testbed used for testing.
1132     :param test_name: The name of the test case.
1133     :type base: str
1134     :type testbed: str
1135     :type test_name: str
1136     :returns: The URL to the plot with the trending data for the given test
1137         case.
1138     :rtype str
1139     """
1140
1141     url = base
1142     file_name = ""
1143     anchor = ".html#"
1144     feature = ""
1145
1146     if "lbdpdk" in test_name or "lbvpp" in test_name:
1147         file_name = "link_bonding"
1148
1149     elif "114b" in test_name and "vhost" in test_name:
1150         file_name = "vts"
1151
1152     elif "testpmd" in test_name or "l3fwd" in test_name:
1153         file_name = "dpdk"
1154
1155     elif "memif" in test_name:
1156         file_name = "container_memif"
1157         feature = "-base"
1158
1159     elif "srv6" in test_name:
1160         file_name = "srv6"
1161
1162     elif "vhost" in test_name:
1163         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1164             file_name = "vm_vhost_l2"
1165             if "114b" in test_name:
1166                 feature = ""
1167             elif "l2xcbase" in test_name and "x520" in test_name:
1168                 feature = "-base-l2xc"
1169             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1170                 feature = "-base-l2bd"
1171             else:
1172                 feature = "-base"
1173         elif "ip4base" in test_name:
1174             file_name = "vm_vhost_ip4"
1175             feature = "-base"
1176
1177     elif "ipsecbasetnlsw" in test_name:
1178         file_name = "ipsecsw"
1179         feature = "-base-scale"
1180
1181     elif "ipsec" in test_name:
1182         file_name = "ipsec"
1183         feature = "-base-scale"
1184         if "hw-" in test_name:
1185             file_name = "ipsechw"
1186         elif "sw-" in test_name:
1187             file_name = "ipsecsw"
1188         if "-int-" in test_name:
1189             feature = "-base-scale-int"
1190         elif "tnl" in test_name:
1191             feature = "-base-scale-tnl"
1192
1193     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1194         file_name = "ip4_tunnels"
1195         feature = "-base"
1196
1197     elif "ip4base" in test_name or "ip4scale" in test_name:
1198         file_name = "ip4"
1199         if "xl710" in test_name:
1200             feature = "-base-scale-features"
1201         elif "iacl" in test_name:
1202             feature = "-features-iacl"
1203         elif "oacl" in test_name:
1204             feature = "-features-oacl"
1205         elif "snat" in test_name or "cop" in test_name:
1206             feature = "-features"
1207         else:
1208             feature = "-base-scale"
1209
1210     elif "ip6base" in test_name or "ip6scale" in test_name:
1211         file_name = "ip6"
1212         feature = "-base-scale"
1213
1214     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1215             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1216             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1217         file_name = "l2"
1218         if "macip" in test_name:
1219             feature = "-features-macip"
1220         elif "iacl" in test_name:
1221             feature = "-features-iacl"
1222         elif "oacl" in test_name:
1223             feature = "-features-oacl"
1224         else:
1225             feature = "-base-scale"
1226
1227     if "x520" in test_name:
1228         nic = "x520-"
1229     elif "x710" in test_name:
1230         nic = "x710-"
1231     elif "xl710" in test_name:
1232         nic = "xl710-"
1233     elif "xxv710" in test_name:
1234         nic = "xxv710-"
1235     elif "vic1227" in test_name:
1236         nic = "vic1227-"
1237     elif "vic1385" in test_name:
1238         nic = "vic1385-"
1239     elif "x553" in test_name:
1240         nic = "x553-"
1241     else:
1242         nic = ""
1243     anchor += nic
1244
1245     if "64b" in test_name:
1246         framesize = "64b"
1247     elif "78b" in test_name:
1248         framesize = "78b"
1249     elif "imix" in test_name:
1250         framesize = "imix"
1251     elif "9000b" in test_name:
1252         framesize = "9000b"
1253     elif "1518b" in test_name:
1254         framesize = "1518b"
1255     elif "114b" in test_name:
1256         framesize = "114b"
1257     else:
1258         framesize = ""
1259     anchor += framesize + '-'
1260
1261     if "1t1c" in test_name:
1262         anchor += "1t1c"
1263     elif "2t2c" in test_name:
1264         anchor += "2t2c"
1265     elif "4t4c" in test_name:
1266         anchor += "4t4c"
1267     elif "2t1c" in test_name:
1268         anchor += "2t1c"
1269     elif "4t2c" in test_name:
1270         anchor += "4t2c"
1271     elif "8t4c" in test_name:
1272         anchor += "8t4c"
1273
1274     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1275         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1276
1277
1278 def table_performance_trending_dashboard_html(table, input_data):
1279     """Generate the table(s) with algorithm:
1280     table_performance_trending_dashboard_html specified in the specification
1281     file.
1282
1283     :param table: Table to generate.
1284     :param input_data: Data to process.
1285     :type table: dict
1286     :type input_data: InputData
1287     """
1288
1289     testbed = table.get("testbed", None)
1290     if testbed is None:
1291         logging.error("The testbed is not defined for the table '{0}'.".
1292                       format(table.get("title", "")))
1293         return
1294
1295     logging.info("  Generating the table {0} ...".
1296                  format(table.get("title", "")))
1297
1298     try:
1299         with open(table["input-file"], 'rb') as csv_file:
1300             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1301             csv_lst = [item for item in csv_content]
1302     except KeyError:
1303         logging.warning("The input file is not defined.")
1304         return
1305     except csv.Error as err:
1306         logging.warning("Not possible to process the file '{0}'.\n{1}".
1307                         format(table["input-file"], err))
1308         return
1309
1310     # Table:
1311     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1312
1313     # Table header:
1314     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1315     for idx, item in enumerate(csv_lst[0]):
1316         alignment = "left" if idx == 0 else "center"
1317         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1318         th.text = item
1319
1320     # Rows:
1321     colors = {"regression": ("#ffcccc", "#ff9999"),
1322               "progression": ("#c6ecc6", "#9fdf9f"),
1323               "normal": ("#e9f1fb", "#d4e4f7")}
1324     for r_idx, row in enumerate(csv_lst[1:]):
1325         if int(row[4]):
1326             color = "regression"
1327         elif int(row[5]):
1328             color = "progression"
1329         else:
1330             color = "normal"
1331         background = colors[color][r_idx % 2]
1332         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1333
1334         # Columns:
1335         for c_idx, item in enumerate(row):
1336             alignment = "left" if c_idx == 0 else "center"
1337             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1338             # Name:
1339             if c_idx == 0:
1340                 url = _generate_url("../trending/", testbed, item)
1341                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1342                 ref.text = item
1343             else:
1344                 td.text = item
1345     try:
1346         with open(table["output-file"], 'w') as html_file:
1347             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1348             html_file.write(".. raw:: html\n\n\t")
1349             html_file.write(ET.tostring(dashboard))
1350             html_file.write("\n\t<p><br><br></p>\n")
1351     except KeyError:
1352         logging.warning("The output file is not defined.")
1353         return
1354
1355
1356 def table_last_failed_tests(table, input_data):
1357     """Generate the table(s) with algorithm: table_last_failed_tests
1358     specified in the specification file.
1359
1360     :param table: Table to generate.
1361     :param input_data: Data to process.
1362     :type table: pandas.Series
1363     :type input_data: InputData
1364     """
1365
1366     logging.info("  Generating the table {0} ...".
1367                  format(table.get("title", "")))
1368
1369     # Transform the data
1370     logging.info("    Creating the data set for the {0} '{1}'.".
1371                  format(table.get("type", ""), table.get("title", "")))
1372     data = input_data.filter_data(table, continue_on_error=True)
1373
1374     if data is None or data.empty:
1375         logging.warn("    No data for the {0} '{1}'.".
1376                      format(table.get("type", ""), table.get("title", "")))
1377         return
1378
1379     tbl_list = list()
1380     for job, builds in table["data"].items():
1381         for build in builds:
1382             build = str(build)
1383             try:
1384                 version = input_data.metadata(job, build).get("version", "")
1385             except KeyError:
1386                 logging.error("Data for {job}: {build} is not present.".
1387                               format(job=job, build=build))
1388                 return
1389             tbl_list.append(build)
1390             tbl_list.append(version)
1391             for tst_name, tst_data in data[job][build].iteritems():
1392                 if tst_data["status"] != "FAIL":
1393                     continue
1394                 groups = re.search(REGEX_NIC, tst_data["parent"])
1395                 if not groups:
1396                     continue
1397                 nic = groups.group(0)
1398                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1399
1400     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1401     logging.info("    Writing file: '{0}'".format(file_name))
1402     with open(file_name, "w") as file_handler:
1403         for test in tbl_list:
1404             file_handler.write(test + '\n')
1405
1406
1407 def table_failed_tests(table, input_data):
1408     """Generate the table(s) with algorithm: table_failed_tests
1409     specified in the specification file.
1410
1411     :param table: Table to generate.
1412     :param input_data: Data to process.
1413     :type table: pandas.Series
1414     :type input_data: InputData
1415     """
1416
1417     logging.info("  Generating the table {0} ...".
1418                  format(table.get("title", "")))
1419
1420     # Transform the data
1421     logging.info("    Creating the data set for the {0} '{1}'.".
1422                  format(table.get("type", ""), table.get("title", "")))
1423     data = input_data.filter_data(table, continue_on_error=True)
1424
1425     # Prepare the header of the tables
1426     header = ["Test Case",
1427               "Failures [#]",
1428               "Last Failure [Time]",
1429               "Last Failure [VPP-Build-Id]",
1430               "Last Failure [CSIT-Job-Build-Id]"]
1431
1432     # Generate the data for the table according to the model in the table
1433     # specification
1434
1435     now = dt.utcnow()
1436     timeperiod = timedelta(int(table.get("window", 7)))
1437
1438     tbl_dict = dict()
1439     for job, builds in table["data"].items():
1440         for build in builds:
1441             build = str(build)
1442             for tst_name, tst_data in data[job][build].iteritems():
1443                 if tst_name.lower() in table.get("ignore-list", list()):
1444                     continue
1445                 if tbl_dict.get(tst_name, None) is None:
1446                     groups = re.search(REGEX_NIC, tst_data["parent"])
1447                     if not groups:
1448                         continue
1449                     nic = groups.group(0)
1450                     tbl_dict[tst_name] = {
1451                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1452                         "data": OrderedDict()}
1453                 try:
1454                     generated = input_data.metadata(job, build).\
1455                         get("generated", "")
1456                     if not generated:
1457                         continue
1458                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1459                     if (now - then) <= timeperiod:
1460                         tbl_dict[tst_name]["data"][build] = (
1461                             tst_data["status"],
1462                             generated,
1463                             input_data.metadata(job, build).get("version", ""),
1464                             build)
1465                 except (TypeError, KeyError) as err:
1466                     logging.warning("tst_name: {} - err: {}".
1467                                     format(tst_name, repr(err)))
1468
1469     max_fails = 0
1470     tbl_lst = list()
1471     for tst_data in tbl_dict.values():
1472         fails_nr = 0
1473         for val in tst_data["data"].values():
1474             if val[0] == "FAIL":
1475                 fails_nr += 1
1476                 fails_last_date = val[1]
1477                 fails_last_vpp = val[2]
1478                 fails_last_csit = val[3]
1479         if fails_nr:
1480             max_fails = fails_nr if fails_nr > max_fails else max_fails
1481             tbl_lst.append([tst_data["name"],
1482                             fails_nr,
1483                             fails_last_date,
1484                             fails_last_vpp,
1485                             "mrr-daily-build-{0}".format(fails_last_csit)])
1486
1487     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1488     tbl_sorted = list()
1489     for nrf in range(max_fails, -1, -1):
1490         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1491         tbl_sorted.extend(tbl_fails)
1492     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1493
1494     logging.info("    Writing file: '{0}'".format(file_name))
1495     with open(file_name, "w") as file_handler:
1496         file_handler.write(",".join(header) + "\n")
1497         for test in tbl_sorted:
1498             file_handler.write(",".join([str(item) for item in test]) + '\n')
1499
1500     txt_file_name = "{0}.txt".format(table["output-file"])
1501     logging.info("    Writing file: '{0}'".format(txt_file_name))
1502     convert_csv_to_pretty_txt(file_name, txt_file_name)
1503
1504
1505 def table_failed_tests_html(table, input_data):
1506     """Generate the table(s) with algorithm: table_failed_tests_html
1507     specified in the specification file.
1508
1509     :param table: Table to generate.
1510     :param input_data: Data to process.
1511     :type table: pandas.Series
1512     :type input_data: InputData
1513     """
1514
1515     testbed = table.get("testbed", None)
1516     if testbed is None:
1517         logging.error("The testbed is not defined for the table '{0}'.".
1518                       format(table.get("title", "")))
1519         return
1520
1521     logging.info("  Generating the table {0} ...".
1522                  format(table.get("title", "")))
1523
1524     try:
1525         with open(table["input-file"], 'rb') as csv_file:
1526             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1527             csv_lst = [item for item in csv_content]
1528     except KeyError:
1529         logging.warning("The input file is not defined.")
1530         return
1531     except csv.Error as err:
1532         logging.warning("Not possible to process the file '{0}'.\n{1}".
1533                         format(table["input-file"], err))
1534         return
1535
1536     # Table:
1537     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1538
1539     # Table header:
1540     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1541     for idx, item in enumerate(csv_lst[0]):
1542         alignment = "left" if idx == 0 else "center"
1543         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1544         th.text = item
1545
1546     # Rows:
1547     colors = ("#e9f1fb", "#d4e4f7")
1548     for r_idx, row in enumerate(csv_lst[1:]):
1549         background = colors[r_idx % 2]
1550         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1551
1552         # Columns:
1553         for c_idx, item in enumerate(row):
1554             alignment = "left" if c_idx == 0 else "center"
1555             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1556             # Name:
1557             if c_idx == 0:
1558                 url = _generate_url("../trending/", testbed, item)
1559                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1560                 ref.text = item
1561             else:
1562                 td.text = item
1563     try:
1564         with open(table["output-file"], 'w') as html_file:
1565             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1566             html_file.write(".. raw:: html\n\n\t")
1567             html_file.write(ET.tostring(failed_tests))
1568             html_file.write("\n\t<p><br><br></p>\n")
1569     except KeyError:
1570         logging.warning("The output file is not defined.")
1571         return