CSIT-1590: Performance comparison analysis
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 if tbl_dict.get(tst_name_mod, None) is None:
300                     groups = re.search(REGEX_NIC, tst_data["parent"])
301                     nic = groups.group(0) if groups else ""
302                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
303                                                           split("-")[:-1]))
304                     if "across testbeds" in table["title"].lower() or \
305                             "across topologies" in table["title"].lower():
306                         name = name.\
307                             replace("1t1c", "1c").replace("2t1c", "1c").\
308                             replace("2t2c", "2c").replace("4t2c", "2c").\
309                             replace("4t4c", "4c").replace("8t4c", "4c")
310                     tbl_dict[tst_name_mod] = {"name": name,
311                                               "ref-data": list(),
312                                               "cmp-data": list()}
313                 try:
314                     # TODO: Re-work when NDRPDRDISC tests are not used
315                     if table["include-tests"] == "MRR":
316                         tbl_dict[tst_name_mod]["cmp-data"]. \
317                             append(tst_data["result"]["receive-rate"].avg)
318                     elif table["include-tests"] == "PDR":
319                         if tst_data["type"] == "PDR":
320                             tbl_dict[tst_name_mod]["cmp-data"]. \
321                                 append(tst_data["throughput"]["value"])
322                         elif tst_data["type"] == "NDRPDR":
323                             tbl_dict[tst_name_mod]["cmp-data"].append(
324                                 tst_data["throughput"]["PDR"]["LOWER"])
325                     elif table["include-tests"] == "NDR":
326                         if tst_data["type"] == "NDR":
327                             tbl_dict[tst_name_mod]["cmp-data"]. \
328                                 append(tst_data["throughput"]["value"])
329                         elif tst_data["type"] == "NDRPDR":
330                             tbl_dict[tst_name_mod]["cmp-data"].append(
331                                 tst_data["throughput"]["NDR"]["LOWER"])
332                     else:
333                         continue
334                 except (KeyError, TypeError):
335                     pass
336     if history:
337         for item in history:
338             for job, builds in item["data"].items():
339                 for build in builds:
340                     for tst_name, tst_data in data[job][str(build)].iteritems():
341                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343                             replace("-ndrdisc", "").replace("-pdr", ""). \
344                             replace("-ndr", "").\
345                             replace("1t1c", "1c").replace("2t1c", "1c").\
346                             replace("2t2c", "2c").replace("4t2c", "2c").\
347                             replace("4t4c", "4c").replace("8t4c", "4c")
348                         if "across topologies" in table["title"].lower():
349                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
350                         if tbl_dict.get(tst_name_mod, None) is None:
351                             continue
352                         if tbl_dict[tst_name_mod].get("history", None) is None:
353                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
354                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
355                                                              None) is None:
356                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
357                                 list()
358                         try:
359                             # TODO: Re-work when NDRPDRDISC tests are not used
360                             if table["include-tests"] == "MRR":
361                                 tbl_dict[tst_name_mod]["history"][item["title"
362                                 ]].append(tst_data["result"]["receive-rate"].
363                                           avg)
364                             elif table["include-tests"] == "PDR":
365                                 if tst_data["type"] == "PDR":
366                                     tbl_dict[tst_name_mod]["history"][
367                                         item["title"]].\
368                                         append(tst_data["throughput"]["value"])
369                                 elif tst_data["type"] == "NDRPDR":
370                                     tbl_dict[tst_name_mod]["history"][item[
371                                         "title"]].append(tst_data["throughput"][
372                                         "PDR"]["LOWER"])
373                             elif table["include-tests"] == "NDR":
374                                 if tst_data["type"] == "NDR":
375                                     tbl_dict[tst_name_mod]["history"][
376                                         item["title"]].\
377                                         append(tst_data["throughput"]["value"])
378                                 elif tst_data["type"] == "NDRPDR":
379                                     tbl_dict[tst_name_mod]["history"][item[
380                                         "title"]].append(tst_data["throughput"][
381                                         "NDR"]["LOWER"])
382                             else:
383                                 continue
384                         except (TypeError, KeyError):
385                             pass
386
387     tbl_lst = list()
388     for tst_name in tbl_dict.keys():
389         item = [tbl_dict[tst_name]["name"], ]
390         if history:
391             if tbl_dict[tst_name].get("history", None) is not None:
392                 for hist_data in tbl_dict[tst_name]["history"].values():
393                     if hist_data:
394                         item.append(round(mean(hist_data) / 1000000, 2))
395                         item.append(round(stdev(hist_data) / 1000000, 2))
396                     else:
397                         item.extend([None, None])
398             else:
399                 item.extend([None, None])
400         data_t = tbl_dict[tst_name]["ref-data"]
401         if data_t:
402             item.append(round(mean(data_t) / 1000000, 2))
403             item.append(round(stdev(data_t) / 1000000, 2))
404         else:
405             item.extend([None, None])
406         data_t = tbl_dict[tst_name]["cmp-data"]
407         if data_t:
408             item.append(round(mean(data_t) / 1000000, 2))
409             item.append(round(stdev(data_t) / 1000000, 2))
410         else:
411             item.extend([None, None])
412         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
413             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
414         else:
415             item.append(None)
416         if len(item) == len(header):
417             tbl_lst.append(item)
418
419     # Sort the table according to the relative change
420     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
421
422     # Generate csv tables:
423     csv_file = "{0}.csv".format(table["output-file"])
424     with open(csv_file, "w") as file_handler:
425         file_handler.write(header_str)
426         for test in tbl_lst:
427             file_handler.write(",".join([str(item) for item in test]) + "\n")
428
429     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
430
431
432 def table_performance_comparison_nic(table, input_data):
433     """Generate the table(s) with algorithm: table_performance_comparison
434     specified in the specification file.
435
436     :param table: Table to generate.
437     :param input_data: Data to process.
438     :type table: pandas.Series
439     :type input_data: InputData
440     """
441
442     logging.info("  Generating the table {0} ...".
443                  format(table.get("title", "")))
444
445     # Transform the data
446     logging.info("    Creating the data set for the {0} '{1}'.".
447                  format(table.get("type", ""), table.get("title", "")))
448     data = input_data.filter_data(table, continue_on_error=True)
449
450     # Prepare the header of the tables
451     try:
452         header = ["Test case", ]
453
454         if table["include-tests"] == "MRR":
455             hdr_param = "Receive Rate"
456         else:
457             hdr_param = "Throughput"
458
459         history = table.get("history", None)
460         if history:
461             for item in history:
462                 header.extend(
463                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
464                      "{0} Stdev [Mpps]".format(item["title"])])
465         header.extend(
466             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
467              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
468              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
469              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
470              "Delta [%]"])
471         header_str = ",".join(header) + "\n"
472     except (AttributeError, KeyError) as err:
473         logging.error("The model is invalid, missing parameter: {0}".
474                       format(err))
475         return
476
477     # Prepare data to the table:
478     tbl_dict = dict()
479     for job, builds in table["reference"]["data"].items():
480         for build in builds:
481             for tst_name, tst_data in data[job][str(build)].iteritems():
482                 if table["reference"]["nic"] not in tst_data["tags"]:
483                     continue
484                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
485                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
486                     replace("-ndrdisc", "").replace("-pdr", "").\
487                     replace("-ndr", "").\
488                     replace("1t1c", "1c").replace("2t1c", "1c").\
489                     replace("2t2c", "2c").replace("4t2c", "2c").\
490                     replace("4t4c", "4c").replace("8t4c", "4c")
491                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
492                 if "across topologies" in table["title"].lower():
493                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
494                 if tbl_dict.get(tst_name_mod, None) is None:
495                     name = "{0}".format("-".join(tst_data["name"].
496                                                  split("-")[:-1]))
497                     if "across testbeds" in table["title"].lower() or \
498                             "across topologies" in table["title"].lower():
499                         name = name.\
500                             replace("1t1c", "1c").replace("2t1c", "1c").\
501                             replace("2t2c", "2c").replace("4t2c", "2c").\
502                             replace("4t4c", "4c").replace("8t4c", "4c")
503                     tbl_dict[tst_name_mod] = {"name": name,
504                                               "ref-data": list(),
505                                               "cmp-data": list()}
506                 try:
507                     # TODO: Re-work when NDRPDRDISC tests are not used
508                     if table["include-tests"] == "MRR":
509                         tbl_dict[tst_name_mod]["ref-data"]. \
510                             append(tst_data["result"]["receive-rate"].avg)
511                     elif table["include-tests"] == "PDR":
512                         if tst_data["type"] == "PDR":
513                             tbl_dict[tst_name_mod]["ref-data"]. \
514                                 append(tst_data["throughput"]["value"])
515                         elif tst_data["type"] == "NDRPDR":
516                             tbl_dict[tst_name_mod]["ref-data"].append(
517                                 tst_data["throughput"]["PDR"]["LOWER"])
518                     elif table["include-tests"] == "NDR":
519                         if tst_data["type"] == "NDR":
520                             tbl_dict[tst_name_mod]["ref-data"]. \
521                                 append(tst_data["throughput"]["value"])
522                         elif tst_data["type"] == "NDRPDR":
523                             tbl_dict[tst_name_mod]["ref-data"].append(
524                                 tst_data["throughput"]["NDR"]["LOWER"])
525                     else:
526                         continue
527                 except TypeError:
528                     pass  # No data in output.xml for this test
529
530     for job, builds in table["compare"]["data"].items():
531         for build in builds:
532             for tst_name, tst_data in data[job][str(build)].iteritems():
533                 if table["compare"]["nic"] not in tst_data["tags"]:
534                     continue
535                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
536                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
537                     replace("-ndrdisc", "").replace("-pdr", ""). \
538                     replace("-ndr", "").\
539                     replace("1t1c", "1c").replace("2t1c", "1c").\
540                     replace("2t2c", "2c").replace("4t2c", "2c").\
541                     replace("4t4c", "4c").replace("8t4c", "4c")
542                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
543                 if "across topologies" in table["title"].lower():
544                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
545                 if tbl_dict.get(tst_name_mod, None) is None:
546                     name = "{0}".format("-".join(tst_data["name"].
547                                                  split("-")[:-1]))
548                     if "across testbeds" in table["title"].lower() or \
549                             "across topologies" in table["title"].lower():
550                         name = name.\
551                             replace("1t1c", "1c").replace("2t1c", "1c").\
552                             replace("2t2c", "2c").replace("4t2c", "2c").\
553                             replace("4t4c", "4c").replace("8t4c", "4c")
554                     tbl_dict[tst_name_mod] = {"name": name,
555                                               "ref-data": list(),
556                                               "cmp-data": list()}
557                 try:
558                     # TODO: Re-work when NDRPDRDISC tests are not used
559                     if table["include-tests"] == "MRR":
560                         tbl_dict[tst_name_mod]["cmp-data"]. \
561                             append(tst_data["result"]["receive-rate"].avg)
562                     elif table["include-tests"] == "PDR":
563                         if tst_data["type"] == "PDR":
564                             tbl_dict[tst_name_mod]["cmp-data"]. \
565                                 append(tst_data["throughput"]["value"])
566                         elif tst_data["type"] == "NDRPDR":
567                             tbl_dict[tst_name_mod]["cmp-data"].append(
568                                 tst_data["throughput"]["PDR"]["LOWER"])
569                     elif table["include-tests"] == "NDR":
570                         if tst_data["type"] == "NDR":
571                             tbl_dict[tst_name_mod]["cmp-data"]. \
572                                 append(tst_data["throughput"]["value"])
573                         elif tst_data["type"] == "NDRPDR":
574                             tbl_dict[tst_name_mod]["cmp-data"].append(
575                                 tst_data["throughput"]["NDR"]["LOWER"])
576                     else:
577                         continue
578                 except (KeyError, TypeError):
579                     pass
580
581     if history:
582         for item in history:
583             for job, builds in item["data"].items():
584                 for build in builds:
585                     for tst_name, tst_data in data[job][str(build)].iteritems():
586                         if item["nic"] not in tst_data["tags"]:
587                             continue
588                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
589                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
590                             replace("-ndrdisc", "").replace("-pdr", ""). \
591                             replace("-ndr", "").\
592                             replace("1t1c", "1c").replace("2t1c", "1c").\
593                             replace("2t2c", "2c").replace("4t2c", "2c").\
594                             replace("4t4c", "4c").replace("8t4c", "4c")
595                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
596                         if "across topologies" in table["title"].lower():
597                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
598                         if tbl_dict.get(tst_name_mod, None) is None:
599                             continue
600                         if tbl_dict[tst_name_mod].get("history", None) is None:
601                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
602                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
603                                                              None) is None:
604                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
605                                 list()
606                         try:
607                             # TODO: Re-work when NDRPDRDISC tests are not used
608                             if table["include-tests"] == "MRR":
609                                 tbl_dict[tst_name_mod]["history"][item["title"
610                                 ]].append(tst_data["result"]["receive-rate"].
611                                           avg)
612                             elif table["include-tests"] == "PDR":
613                                 if tst_data["type"] == "PDR":
614                                     tbl_dict[tst_name_mod]["history"][
615                                         item["title"]].\
616                                         append(tst_data["throughput"]["value"])
617                                 elif tst_data["type"] == "NDRPDR":
618                                     tbl_dict[tst_name_mod]["history"][item[
619                                         "title"]].append(tst_data["throughput"][
620                                         "PDR"]["LOWER"])
621                             elif table["include-tests"] == "NDR":
622                                 if tst_data["type"] == "NDR":
623                                     tbl_dict[tst_name_mod]["history"][
624                                         item["title"]].\
625                                         append(tst_data["throughput"]["value"])
626                                 elif tst_data["type"] == "NDRPDR":
627                                     tbl_dict[tst_name_mod]["history"][item[
628                                         "title"]].append(tst_data["throughput"][
629                                         "NDR"]["LOWER"])
630                             else:
631                                 continue
632                         except (TypeError, KeyError):
633                             pass
634
635     tbl_lst = list()
636     for tst_name in tbl_dict.keys():
637         item = [tbl_dict[tst_name]["name"], ]
638         if history:
639             if tbl_dict[tst_name].get("history", None) is not None:
640                 for hist_data in tbl_dict[tst_name]["history"].values():
641                     if hist_data:
642                         item.append(round(mean(hist_data) / 1000000, 2))
643                         item.append(round(stdev(hist_data) / 1000000, 2))
644                     else:
645                         item.extend([None, None])
646             else:
647                 item.extend([None, None])
648         data_t = tbl_dict[tst_name]["ref-data"]
649         if data_t:
650             item.append(round(mean(data_t) / 1000000, 2))
651             item.append(round(stdev(data_t) / 1000000, 2))
652         else:
653             item.extend([None, None])
654         data_t = tbl_dict[tst_name]["cmp-data"]
655         if data_t:
656             item.append(round(mean(data_t) / 1000000, 2))
657             item.append(round(stdev(data_t) / 1000000, 2))
658         else:
659             item.extend([None, None])
660         if "dot1q" in tbl_dict[tst_name]["name"]:
661             item.append("Changed methodology")
662         elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
663             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
664         else:
665             item.append("n/a")
666         if len(item) == len(header):
667             tbl_lst.append(item)
668
669     # Sort the table according to the relative change
670     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
671
672     # Generate csv tables:
673     csv_file = "{0}.csv".format(table["output-file"])
674     with open(csv_file, "w") as file_handler:
675         file_handler.write(header_str)
676         for test in tbl_lst:
677             file_handler.write(",".join([str(item) for item in test]) + "\n")
678
679     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
680
681
682 def table_nics_comparison(table, input_data):
683     """Generate the table(s) with algorithm: table_nics_comparison
684     specified in the specification file.
685
686     :param table: Table to generate.
687     :param input_data: Data to process.
688     :type table: pandas.Series
689     :type input_data: InputData
690     """
691
692     logging.info("  Generating the table {0} ...".
693                  format(table.get("title", "")))
694
695     # Transform the data
696     logging.info("    Creating the data set for the {0} '{1}'.".
697                  format(table.get("type", ""), table.get("title", "")))
698     data = input_data.filter_data(table, continue_on_error=True)
699
700     # Prepare the header of the tables
701     try:
702         header = ["Test case", ]
703
704         if table["include-tests"] == "MRR":
705             hdr_param = "Receive Rate"
706         else:
707             hdr_param = "Throughput"
708
709         header.extend(
710             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
711              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
712              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
713              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
714              "Delta [%]"])
715         header_str = ",".join(header) + "\n"
716     except (AttributeError, KeyError) as err:
717         logging.error("The model is invalid, missing parameter: {0}".
718                       format(err))
719         return
720
721     # Prepare data to the table:
722     tbl_dict = dict()
723     for job, builds in table["data"].items():
724         for build in builds:
725             for tst_name, tst_data in data[job][str(build)].iteritems():
726                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
727                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
728                     replace("-ndrdisc", "").replace("-pdr", "").\
729                     replace("-ndr", "").\
730                     replace("1t1c", "1c").replace("2t1c", "1c").\
731                     replace("2t2c", "2c").replace("4t2c", "2c").\
732                     replace("4t4c", "4c").replace("8t4c", "4c")
733                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
734                 if tbl_dict.get(tst_name_mod, None) is None:
735                     name = "-".join(tst_data["name"].split("-")[:-1])
736                     tbl_dict[tst_name_mod] = {"name": name,
737                                               "ref-data": list(),
738                                               "cmp-data": list()}
739                 try:
740                     if table["include-tests"] == "MRR":
741                         result = tst_data["result"]["receive-rate"].avg
742                     elif table["include-tests"] == "PDR":
743                         result = tst_data["throughput"]["PDR"]["LOWER"]
744                     elif table["include-tests"] == "NDR":
745                         result = tst_data["throughput"]["NDR"]["LOWER"]
746                     else:
747                         result = None
748
749                     if result:
750                         if table["reference"]["nic"] in tst_data["tags"]:
751                             tbl_dict[tst_name_mod]["ref-data"].append(result)
752                         elif table["compare"]["nic"] in tst_data["tags"]:
753                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
754                 except (TypeError, KeyError) as err:
755                     logging.debug("No data for {0}".format(tst_name))
756                     logging.debug(repr(err))
757                     # No data in output.xml for this test
758
759     tbl_lst = list()
760     for tst_name in tbl_dict.keys():
761         item = [tbl_dict[tst_name]["name"], ]
762         data_t = tbl_dict[tst_name]["ref-data"]
763         if data_t:
764             item.append(round(mean(data_t) / 1000000, 2))
765             item.append(round(stdev(data_t) / 1000000, 2))
766         else:
767             item.extend([None, None])
768         data_t = tbl_dict[tst_name]["cmp-data"]
769         if data_t:
770             item.append(round(mean(data_t) / 1000000, 2))
771             item.append(round(stdev(data_t) / 1000000, 2))
772         else:
773             item.extend([None, None])
774         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
775             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
776         if len(item) == len(header):
777             tbl_lst.append(item)
778
779     # Sort the table according to the relative change
780     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
781
782     # Generate csv tables:
783     csv_file = "{0}.csv".format(table["output-file"])
784     with open(csv_file, "w") as file_handler:
785         file_handler.write(header_str)
786         for test in tbl_lst:
787             file_handler.write(",".join([str(item) for item in test]) + "\n")
788
789     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
790
791
792 def table_soak_vs_ndr(table, input_data):
793     """Generate the table(s) with algorithm: table_soak_vs_ndr
794     specified in the specification file.
795
796     :param table: Table to generate.
797     :param input_data: Data to process.
798     :type table: pandas.Series
799     :type input_data: InputData
800     """
801
802     logging.info("  Generating the table {0} ...".
803                  format(table.get("title", "")))
804
805     # Transform the data
806     logging.info("    Creating the data set for the {0} '{1}'.".
807                  format(table.get("type", ""), table.get("title", "")))
808     data = input_data.filter_data(table, continue_on_error=True)
809
810     # Prepare the header of the table
811     try:
812         header = [
813             "Test case",
814             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
815             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
816             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
817             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
818             "Delta [%]", "Stdev of delta [%]"]
819         header_str = ",".join(header) + "\n"
820     except (AttributeError, KeyError) as err:
821         logging.error("The model is invalid, missing parameter: {0}".
822                       format(err))
823         return
824
825     # Create a list of available SOAK test results:
826     tbl_dict = dict()
827     for job, builds in table["compare"]["data"].items():
828         for build in builds:
829             for tst_name, tst_data in data[job][str(build)].iteritems():
830                 if tst_data["type"] == "SOAK":
831                     tst_name_mod = tst_name.replace("-soak", "")
832                     if tbl_dict.get(tst_name_mod, None) is None:
833                         groups = re.search(REGEX_NIC, tst_data["parent"])
834                         nic = groups.group(0) if groups else ""
835                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
836                                                               split("-")[:-1]))
837                         tbl_dict[tst_name_mod] = {
838                             "name": name,
839                             "ref-data": list(),
840                             "cmp-data": list()
841                         }
842                     try:
843                         tbl_dict[tst_name_mod]["cmp-data"].append(
844                             tst_data["throughput"]["LOWER"])
845                     except (KeyError, TypeError):
846                         pass
847     tests_lst = tbl_dict.keys()
848
849     # Add corresponding NDR test results:
850     for job, builds in table["reference"]["data"].items():
851         for build in builds:
852             for tst_name, tst_data in data[job][str(build)].iteritems():
853                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
854                     replace("-mrr", "")
855                 if tst_name_mod in tests_lst:
856                     try:
857                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
858                             if table["include-tests"] == "MRR":
859                                 result = tst_data["result"]["receive-rate"].avg
860                             elif table["include-tests"] == "PDR":
861                                 result = tst_data["throughput"]["PDR"]["LOWER"]
862                             elif table["include-tests"] == "NDR":
863                                 result = tst_data["throughput"]["NDR"]["LOWER"]
864                             else:
865                                 result = None
866                             if result is not None:
867                                 tbl_dict[tst_name_mod]["ref-data"].append(
868                                     result)
869                     except (KeyError, TypeError):
870                         continue
871
872     tbl_lst = list()
873     for tst_name in tbl_dict.keys():
874         item = [tbl_dict[tst_name]["name"], ]
875         data_r = tbl_dict[tst_name]["ref-data"]
876         if data_r:
877             data_r_mean = mean(data_r)
878             item.append(round(data_r_mean / 1000000, 2))
879             data_r_stdev = stdev(data_r)
880             item.append(round(data_r_stdev / 1000000, 2))
881         else:
882             data_r_mean = None
883             data_r_stdev = None
884             item.extend([None, None])
885         data_c = tbl_dict[tst_name]["cmp-data"]
886         if data_c:
887             data_c_mean = mean(data_c)
888             item.append(round(data_c_mean / 1000000, 2))
889             data_c_stdev = stdev(data_c)
890             item.append(round(data_c_stdev / 1000000, 2))
891         else:
892             data_c_mean = None
893             data_c_stdev = None
894             item.extend([None, None])
895         if data_r_mean and data_c_mean:
896             delta, d_stdev = relative_change_stdev(
897                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
898             item.append(round(delta, 2))
899             item.append(round(d_stdev, 2))
900             tbl_lst.append(item)
901
902     # Sort the table according to the relative change
903     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
904
905     # Generate csv tables:
906     csv_file = "{0}.csv".format(table["output-file"])
907     with open(csv_file, "w") as file_handler:
908         file_handler.write(header_str)
909         for test in tbl_lst:
910             file_handler.write(",".join([str(item) for item in test]) + "\n")
911
912     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
913
914
915 def table_performance_trending_dashboard(table, input_data):
916     """Generate the table(s) with algorithm:
917     table_performance_trending_dashboard
918     specified in the specification file.
919
920     :param table: Table to generate.
921     :param input_data: Data to process.
922     :type table: pandas.Series
923     :type input_data: InputData
924     """
925
926     logging.info("  Generating the table {0} ...".
927                  format(table.get("title", "")))
928
929     # Transform the data
930     logging.info("    Creating the data set for the {0} '{1}'.".
931                  format(table.get("type", ""), table.get("title", "")))
932     data = input_data.filter_data(table, continue_on_error=True)
933
934     # Prepare the header of the tables
935     header = ["Test Case",
936               "Trend [Mpps]",
937               "Short-Term Change [%]",
938               "Long-Term Change [%]",
939               "Regressions [#]",
940               "Progressions [#]"
941               ]
942     header_str = ",".join(header) + "\n"
943
944     # Prepare data to the table:
945     tbl_dict = dict()
946     for job, builds in table["data"].items():
947         for build in builds:
948             for tst_name, tst_data in data[job][str(build)].iteritems():
949                 if tst_name.lower() in table.get("ignore-list", list()):
950                     continue
951                 if tbl_dict.get(tst_name, None) is None:
952                     groups = re.search(REGEX_NIC, tst_data["parent"])
953                     if not groups:
954                         continue
955                     nic = groups.group(0)
956                     tbl_dict[tst_name] = {
957                         "name": "{0}-{1}".format(nic, tst_data["name"]),
958                         "data": OrderedDict()}
959                 try:
960                     tbl_dict[tst_name]["data"][str(build)] = \
961                         tst_data["result"]["receive-rate"]
962                 except (TypeError, KeyError):
963                     pass  # No data in output.xml for this test
964
965     tbl_lst = list()
966     for tst_name in tbl_dict.keys():
967         data_t = tbl_dict[tst_name]["data"]
968         if len(data_t) < 2:
969             continue
970
971         classification_lst, avgs = classify_anomalies(data_t)
972
973         win_size = min(len(data_t), table["window"])
974         long_win_size = min(len(data_t), table["long-trend-window"])
975
976         try:
977             max_long_avg = max(
978                 [x for x in avgs[-long_win_size:-win_size]
979                  if not isnan(x)])
980         except ValueError:
981             max_long_avg = nan
982         last_avg = avgs[-1]
983         avg_week_ago = avgs[max(-win_size, -len(avgs))]
984
985         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
986             rel_change_last = nan
987         else:
988             rel_change_last = round(
989                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
990
991         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
992             rel_change_long = nan
993         else:
994             rel_change_long = round(
995                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
996
997         if classification_lst:
998             if isnan(rel_change_last) and isnan(rel_change_long):
999                 continue
1000             if (isnan(last_avg) or
1001                 isnan(rel_change_last) or
1002                 isnan(rel_change_long)):
1003                 continue
1004             tbl_lst.append(
1005                 [tbl_dict[tst_name]["name"],
1006                  round(last_avg / 1000000, 2),
1007                  rel_change_last,
1008                  rel_change_long,
1009                  classification_lst[-win_size:].count("regression"),
1010                  classification_lst[-win_size:].count("progression")])
1011
1012     tbl_lst.sort(key=lambda rel: rel[0])
1013
1014     tbl_sorted = list()
1015     for nrr in range(table["window"], -1, -1):
1016         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1017         for nrp in range(table["window"], -1, -1):
1018             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1019             tbl_out.sort(key=lambda rel: rel[2])
1020             tbl_sorted.extend(tbl_out)
1021
1022     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1023
1024     logging.info("    Writing file: '{0}'".format(file_name))
1025     with open(file_name, "w") as file_handler:
1026         file_handler.write(header_str)
1027         for test in tbl_sorted:
1028             file_handler.write(",".join([str(item) for item in test]) + '\n')
1029
1030     txt_file_name = "{0}.txt".format(table["output-file"])
1031     logging.info("    Writing file: '{0}'".format(txt_file_name))
1032     convert_csv_to_pretty_txt(file_name, txt_file_name)
1033
1034
1035 def _generate_url(base, testbed, test_name):
1036     """Generate URL to a trending plot from the name of the test case.
1037
1038     :param base: The base part of URL common to all test cases.
1039     :param testbed: The testbed used for testing.
1040     :param test_name: The name of the test case.
1041     :type base: str
1042     :type testbed: str
1043     :type test_name: str
1044     :returns: The URL to the plot with the trending data for the given test
1045         case.
1046     :rtype str
1047     """
1048
1049     url = base
1050     file_name = ""
1051     anchor = ".html#"
1052     feature = ""
1053
1054     if "lbdpdk" in test_name or "lbvpp" in test_name:
1055         file_name = "link_bonding"
1056
1057     elif "114b" in test_name and "vhost" in test_name:
1058         file_name = "vts"
1059
1060     elif "testpmd" in test_name or "l3fwd" in test_name:
1061         file_name = "dpdk"
1062
1063     elif "memif" in test_name:
1064         file_name = "container_memif"
1065         feature = "-base"
1066
1067     elif "srv6" in test_name:
1068         file_name = "srv6"
1069
1070     elif "vhost" in test_name:
1071         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1072             file_name = "vm_vhost_l2"
1073             if "114b" in test_name:
1074                 feature = ""
1075             elif "l2xcbase" in test_name and "x520" in test_name:
1076                 feature = "-base-l2xc"
1077             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1078                 feature = "-base-l2bd"
1079             else:
1080                 feature = "-base"
1081         elif "ip4base" in test_name:
1082             file_name = "vm_vhost_ip4"
1083             feature = "-base"
1084
1085     elif "ipsecbasetnlsw" in test_name:
1086         file_name = "ipsecsw"
1087         feature = "-base-scale"
1088
1089     elif "ipsec" in test_name:
1090         file_name = "ipsec"
1091         feature = "-base-scale"
1092         if "hw-" in test_name:
1093             file_name = "ipsechw"
1094         elif "sw-" in test_name:
1095             file_name = "ipsecsw"
1096
1097     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1098         file_name = "ip4_tunnels"
1099         feature = "-base"
1100
1101     elif "ip4base" in test_name or "ip4scale" in test_name:
1102         file_name = "ip4"
1103         if "xl710" in test_name:
1104             feature = "-base-scale-features"
1105         elif "iacl" in test_name:
1106             feature = "-features-iacl"
1107         elif "oacl" in test_name:
1108             feature = "-features-oacl"
1109         elif "snat" in test_name or "cop" in test_name:
1110             feature = "-features"
1111         else:
1112             feature = "-base-scale"
1113
1114     elif "ip6base" in test_name or "ip6scale" in test_name:
1115         file_name = "ip6"
1116         feature = "-base-scale"
1117
1118     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1119             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1120             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1121         file_name = "l2"
1122         if "macip" in test_name:
1123             feature = "-features-macip"
1124         elif "iacl" in test_name:
1125             feature = "-features-iacl"
1126         elif "oacl" in test_name:
1127             feature = "-features-oacl"
1128         else:
1129             feature = "-base-scale"
1130
1131     if "x520" in test_name:
1132         nic = "x520-"
1133     elif "x710" in test_name:
1134         nic = "x710-"
1135     elif "xl710" in test_name:
1136         nic = "xl710-"
1137     elif "xxv710" in test_name:
1138         nic = "xxv710-"
1139     elif "vic1227" in test_name:
1140         nic = "vic1227-"
1141     elif "vic1385" in test_name:
1142         nic = "vic1385-"
1143     else:
1144         nic = ""
1145     anchor += nic
1146
1147     if "64b" in test_name:
1148         framesize = "64b"
1149     elif "78b" in test_name:
1150         framesize = "78b"
1151     elif "imix" in test_name:
1152         framesize = "imix"
1153     elif "9000b" in test_name:
1154         framesize = "9000b"
1155     elif "1518b" in test_name:
1156         framesize = "1518b"
1157     elif "114b" in test_name:
1158         framesize = "114b"
1159     else:
1160         framesize = ""
1161     anchor += framesize + '-'
1162
1163     if "1t1c" in test_name:
1164         anchor += "1t1c"
1165     elif "2t2c" in test_name:
1166         anchor += "2t2c"
1167     elif "4t4c" in test_name:
1168         anchor += "4t4c"
1169     elif "2t1c" in test_name:
1170         anchor += "2t1c"
1171     elif "4t2c" in test_name:
1172         anchor += "4t2c"
1173     elif "8t4c" in test_name:
1174         anchor += "8t4c"
1175
1176     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1177         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1178
1179
1180 def table_performance_trending_dashboard_html(table, input_data):
1181     """Generate the table(s) with algorithm:
1182     table_performance_trending_dashboard_html specified in the specification
1183     file.
1184
1185     :param table: Table to generate.
1186     :param input_data: Data to process.
1187     :type table: dict
1188     :type input_data: InputData
1189     """
1190
1191     testbed = table.get("testbed", None)
1192     if testbed is None:
1193         logging.error("The testbed is not defined for the table '{0}'.".
1194                       format(table.get("title", "")))
1195         return
1196
1197     logging.info("  Generating the table {0} ...".
1198                  format(table.get("title", "")))
1199
1200     try:
1201         with open(table["input-file"], 'rb') as csv_file:
1202             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1203             csv_lst = [item for item in csv_content]
1204     except KeyError:
1205         logging.warning("The input file is not defined.")
1206         return
1207     except csv.Error as err:
1208         logging.warning("Not possible to process the file '{0}'.\n{1}".
1209                         format(table["input-file"], err))
1210         return
1211
1212     # Table:
1213     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1214
1215     # Table header:
1216     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1217     for idx, item in enumerate(csv_lst[0]):
1218         alignment = "left" if idx == 0 else "center"
1219         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1220         th.text = item
1221
1222     # Rows:
1223     colors = {"regression": ("#ffcccc", "#ff9999"),
1224               "progression": ("#c6ecc6", "#9fdf9f"),
1225               "normal": ("#e9f1fb", "#d4e4f7")}
1226     for r_idx, row in enumerate(csv_lst[1:]):
1227         if int(row[4]):
1228             color = "regression"
1229         elif int(row[5]):
1230             color = "progression"
1231         else:
1232             color = "normal"
1233         background = colors[color][r_idx % 2]
1234         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1235
1236         # Columns:
1237         for c_idx, item in enumerate(row):
1238             alignment = "left" if c_idx == 0 else "center"
1239             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1240             # Name:
1241             if c_idx == 0:
1242                 url = _generate_url("../trending/", testbed, item)
1243                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1244                 ref.text = item
1245             else:
1246                 td.text = item
1247     try:
1248         with open(table["output-file"], 'w') as html_file:
1249             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1250             html_file.write(".. raw:: html\n\n\t")
1251             html_file.write(ET.tostring(dashboard))
1252             html_file.write("\n\t<p><br><br></p>\n")
1253     except KeyError:
1254         logging.warning("The output file is not defined.")
1255         return
1256
1257
1258 def table_last_failed_tests(table, input_data):
1259     """Generate the table(s) with algorithm: table_last_failed_tests
1260     specified in the specification file.
1261
1262     :param table: Table to generate.
1263     :param input_data: Data to process.
1264     :type table: pandas.Series
1265     :type input_data: InputData
1266     """
1267
1268     logging.info("  Generating the table {0} ...".
1269                  format(table.get("title", "")))
1270
1271     # Transform the data
1272     logging.info("    Creating the data set for the {0} '{1}'.".
1273                  format(table.get("type", ""), table.get("title", "")))
1274     data = input_data.filter_data(table, continue_on_error=True)
1275
1276     if data is None or data.empty:
1277         logging.warn("    No data for the {0} '{1}'.".
1278                      format(table.get("type", ""), table.get("title", "")))
1279         return
1280
1281     tbl_list = list()
1282     for job, builds in table["data"].items():
1283         for build in builds:
1284             build = str(build)
1285             try:
1286                 version = input_data.metadata(job, build).get("version", "")
1287             except KeyError:
1288                 logging.error("Data for {job}: {build} is not present.".
1289                               format(job=job, build=build))
1290                 return
1291             tbl_list.append(build)
1292             tbl_list.append(version)
1293             for tst_name, tst_data in data[job][build].iteritems():
1294                 if tst_data["status"] != "FAIL":
1295                     continue
1296                 groups = re.search(REGEX_NIC, tst_data["parent"])
1297                 if not groups:
1298                     continue
1299                 nic = groups.group(0)
1300                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1301
1302     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1303     logging.info("    Writing file: '{0}'".format(file_name))
1304     with open(file_name, "w") as file_handler:
1305         for test in tbl_list:
1306             file_handler.write(test + '\n')
1307
1308
1309 def table_failed_tests(table, input_data):
1310     """Generate the table(s) with algorithm: table_failed_tests
1311     specified in the specification file.
1312
1313     :param table: Table to generate.
1314     :param input_data: Data to process.
1315     :type table: pandas.Series
1316     :type input_data: InputData
1317     """
1318
1319     logging.info("  Generating the table {0} ...".
1320                  format(table.get("title", "")))
1321
1322     # Transform the data
1323     logging.info("    Creating the data set for the {0} '{1}'.".
1324                  format(table.get("type", ""), table.get("title", "")))
1325     data = input_data.filter_data(table, continue_on_error=True)
1326
1327     # Prepare the header of the tables
1328     header = ["Test Case",
1329               "Failures [#]",
1330               "Last Failure [Time]",
1331               "Last Failure [VPP-Build-Id]",
1332               "Last Failure [CSIT-Job-Build-Id]"]
1333
1334     # Generate the data for the table according to the model in the table
1335     # specification
1336
1337     now = dt.utcnow()
1338     timeperiod = timedelta(int(table.get("window", 7)))
1339
1340     tbl_dict = dict()
1341     for job, builds in table["data"].items():
1342         for build in builds:
1343             build = str(build)
1344             for tst_name, tst_data in data[job][build].iteritems():
1345                 if tst_name.lower() in table.get("ignore-list", list()):
1346                     continue
1347                 if tbl_dict.get(tst_name, None) is None:
1348                     groups = re.search(REGEX_NIC, tst_data["parent"])
1349                     if not groups:
1350                         continue
1351                     nic = groups.group(0)
1352                     tbl_dict[tst_name] = {
1353                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1354                         "data": OrderedDict()}
1355                 try:
1356                     generated = input_data.metadata(job, build).\
1357                         get("generated", "")
1358                     if not generated:
1359                         continue
1360                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1361                     if (now - then) <= timeperiod:
1362                         tbl_dict[tst_name]["data"][build] = (
1363                             tst_data["status"],
1364                             generated,
1365                             input_data.metadata(job, build).get("version", ""),
1366                             build)
1367                 except (TypeError, KeyError) as err:
1368                     logging.warning("tst_name: {} - err: {}".
1369                                     format(tst_name, repr(err)))
1370
1371     max_fails = 0
1372     tbl_lst = list()
1373     for tst_data in tbl_dict.values():
1374         fails_nr = 0
1375         for val in tst_data["data"].values():
1376             if val[0] == "FAIL":
1377                 fails_nr += 1
1378                 fails_last_date = val[1]
1379                 fails_last_vpp = val[2]
1380                 fails_last_csit = val[3]
1381         if fails_nr:
1382             max_fails = fails_nr if fails_nr > max_fails else max_fails
1383             tbl_lst.append([tst_data["name"],
1384                             fails_nr,
1385                             fails_last_date,
1386                             fails_last_vpp,
1387                             "mrr-daily-build-{0}".format(fails_last_csit)])
1388
1389     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1390     tbl_sorted = list()
1391     for nrf in range(max_fails, -1, -1):
1392         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1393         tbl_sorted.extend(tbl_fails)
1394     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1395
1396     logging.info("    Writing file: '{0}'".format(file_name))
1397     with open(file_name, "w") as file_handler:
1398         file_handler.write(",".join(header) + "\n")
1399         for test in tbl_sorted:
1400             file_handler.write(",".join([str(item) for item in test]) + '\n')
1401
1402     txt_file_name = "{0}.txt".format(table["output-file"])
1403     logging.info("    Writing file: '{0}'".format(txt_file_name))
1404     convert_csv_to_pretty_txt(file_name, txt_file_name)
1405
1406
1407 def table_failed_tests_html(table, input_data):
1408     """Generate the table(s) with algorithm: table_failed_tests_html
1409     specified in the specification file.
1410
1411     :param table: Table to generate.
1412     :param input_data: Data to process.
1413     :type table: pandas.Series
1414     :type input_data: InputData
1415     """
1416
1417     testbed = table.get("testbed", None)
1418     if testbed is None:
1419         logging.error("The testbed is not defined for the table '{0}'.".
1420                       format(table.get("title", "")))
1421         return
1422
1423     logging.info("  Generating the table {0} ...".
1424                  format(table.get("title", "")))
1425
1426     try:
1427         with open(table["input-file"], 'rb') as csv_file:
1428             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1429             csv_lst = [item for item in csv_content]
1430     except KeyError:
1431         logging.warning("The input file is not defined.")
1432         return
1433     except csv.Error as err:
1434         logging.warning("Not possible to process the file '{0}'.\n{1}".
1435                         format(table["input-file"], err))
1436         return
1437
1438     # Table:
1439     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1440
1441     # Table header:
1442     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1443     for idx, item in enumerate(csv_lst[0]):
1444         alignment = "left" if idx == 0 else "center"
1445         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1446         th.text = item
1447
1448     # Rows:
1449     colors = ("#e9f1fb", "#d4e4f7")
1450     for r_idx, row in enumerate(csv_lst[1:]):
1451         background = colors[r_idx % 2]
1452         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1453
1454         # Columns:
1455         for c_idx, item in enumerate(row):
1456             alignment = "left" if c_idx == 0 else "center"
1457             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1458             # Name:
1459             if c_idx == 0:
1460                 url = _generate_url("../trending/", testbed, item)
1461                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1462                 ref.text = item
1463             else:
1464                 td.text = item
1465     try:
1466         with open(table["output-file"], 'w') as html_file:
1467             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1468             html_file.write(".. raw:: html\n\n\t")
1469             html_file.write(ET.tostring(failed_tests))
1470             html_file.write("\n\t<p><br><br></p>\n")
1471     except KeyError:
1472         logging.warning("The output file is not defined.")
1473         return