CSIT-1590: Performance comparison analysis
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 if tbl_dict.get(tst_name_mod, None) is None:
300                     groups = re.search(REGEX_NIC, tst_data["parent"])
301                     nic = groups.group(0) if groups else ""
302                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
303                                                           split("-")[:-1]))
304                     if "across testbeds" in table["title"].lower() or \
305                             "across topologies" in table["title"].lower():
306                         name = name.\
307                             replace("1t1c", "1c").replace("2t1c", "1c").\
308                             replace("2t2c", "2c").replace("4t2c", "2c").\
309                             replace("4t4c", "4c").replace("8t4c", "4c")
310                     tbl_dict[tst_name_mod] = {"name": name,
311                                               "ref-data": list(),
312                                               "cmp-data": list()}
313                 try:
314                     # TODO: Re-work when NDRPDRDISC tests are not used
315                     if table["include-tests"] == "MRR":
316                         tbl_dict[tst_name_mod]["cmp-data"]. \
317                             append(tst_data["result"]["receive-rate"].avg)
318                     elif table["include-tests"] == "PDR":
319                         if tst_data["type"] == "PDR":
320                             tbl_dict[tst_name_mod]["cmp-data"]. \
321                                 append(tst_data["throughput"]["value"])
322                         elif tst_data["type"] == "NDRPDR":
323                             tbl_dict[tst_name_mod]["cmp-data"].append(
324                                 tst_data["throughput"]["PDR"]["LOWER"])
325                     elif table["include-tests"] == "NDR":
326                         if tst_data["type"] == "NDR":
327                             tbl_dict[tst_name_mod]["cmp-data"]. \
328                                 append(tst_data["throughput"]["value"])
329                         elif tst_data["type"] == "NDRPDR":
330                             tbl_dict[tst_name_mod]["cmp-data"].append(
331                                 tst_data["throughput"]["NDR"]["LOWER"])
332                     else:
333                         continue
334                 except (KeyError, TypeError):
335                     pass
336     if history:
337         for item in history:
338             for job, builds in item["data"].items():
339                 for build in builds:
340                     for tst_name, tst_data in data[job][str(build)].iteritems():
341                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343                             replace("-ndrdisc", "").replace("-pdr", ""). \
344                             replace("-ndr", "").\
345                             replace("1t1c", "1c").replace("2t1c", "1c").\
346                             replace("2t2c", "2c").replace("4t2c", "2c").\
347                             replace("4t4c", "4c").replace("8t4c", "4c")
348                         if "across topologies" in table["title"].lower():
349                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
350                         if tbl_dict.get(tst_name_mod, None) is None:
351                             continue
352                         if tbl_dict[tst_name_mod].get("history", None) is None:
353                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
354                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
355                                                              None) is None:
356                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
357                                 list()
358                         try:
359                             # TODO: Re-work when NDRPDRDISC tests are not used
360                             if table["include-tests"] == "MRR":
361                                 tbl_dict[tst_name_mod]["history"][item["title"
362                                 ]].append(tst_data["result"]["receive-rate"].
363                                           avg)
364                             elif table["include-tests"] == "PDR":
365                                 if tst_data["type"] == "PDR":
366                                     tbl_dict[tst_name_mod]["history"][
367                                         item["title"]].\
368                                         append(tst_data["throughput"]["value"])
369                                 elif tst_data["type"] == "NDRPDR":
370                                     tbl_dict[tst_name_mod]["history"][item[
371                                         "title"]].append(tst_data["throughput"][
372                                         "PDR"]["LOWER"])
373                             elif table["include-tests"] == "NDR":
374                                 if tst_data["type"] == "NDR":
375                                     tbl_dict[tst_name_mod]["history"][
376                                         item["title"]].\
377                                         append(tst_data["throughput"]["value"])
378                                 elif tst_data["type"] == "NDRPDR":
379                                     tbl_dict[tst_name_mod]["history"][item[
380                                         "title"]].append(tst_data["throughput"][
381                                         "NDR"]["LOWER"])
382                             else:
383                                 continue
384                         except (TypeError, KeyError):
385                             pass
386
387     tbl_lst = list()
388     for tst_name in tbl_dict.keys():
389         item = [tbl_dict[tst_name]["name"], ]
390         if history:
391             if tbl_dict[tst_name].get("history", None) is not None:
392                 for hist_data in tbl_dict[tst_name]["history"].values():
393                     if hist_data:
394                         item.append(round(mean(hist_data) / 1000000, 2))
395                         item.append(round(stdev(hist_data) / 1000000, 2))
396                     else:
397                         item.extend([None, None])
398             else:
399                 item.extend([None, None])
400         data_t = tbl_dict[tst_name]["ref-data"]
401         if data_t:
402             item.append(round(mean(data_t) / 1000000, 2))
403             item.append(round(stdev(data_t) / 1000000, 2))
404         else:
405             item.extend([None, None])
406         data_t = tbl_dict[tst_name]["cmp-data"]
407         if data_t:
408             item.append(round(mean(data_t) / 1000000, 2))
409             item.append(round(stdev(data_t) / 1000000, 2))
410         else:
411             item.extend([None, None])
412         if "dot1q" in tbl_dict[tst_name]["name"]:
413             item.append("Changed methodology")
414         elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
415             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
416         else:
417             item.append("n/a")
418         if (len(item) == len(header)) and (item[-5] is not None):
419             tbl_lst.append(item)
420
421     # Sort the table according to the relative change
422     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
423
424     # Generate csv tables:
425     csv_file = "{0}.csv".format(table["output-file"])
426     with open(csv_file, "w") as file_handler:
427         file_handler.write(header_str)
428         for test in tbl_lst:
429             file_handler.write(",".join([str(item) for item in test]) + "\n")
430
431     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
432
433
434 def table_performance_comparison_nic(table, input_data):
435     """Generate the table(s) with algorithm: table_performance_comparison
436     specified in the specification file.
437
438     :param table: Table to generate.
439     :param input_data: Data to process.
440     :type table: pandas.Series
441     :type input_data: InputData
442     """
443
444     logging.info("  Generating the table {0} ...".
445                  format(table.get("title", "")))
446
447     # Transform the data
448     logging.info("    Creating the data set for the {0} '{1}'.".
449                  format(table.get("type", ""), table.get("title", "")))
450     data = input_data.filter_data(table, continue_on_error=True)
451
452     # Prepare the header of the tables
453     try:
454         header = ["Test case", ]
455
456         if table["include-tests"] == "MRR":
457             hdr_param = "Receive Rate"
458         else:
459             hdr_param = "Throughput"
460
461         history = table.get("history", None)
462         if history:
463             for item in history:
464                 header.extend(
465                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
466                      "{0} Stdev [Mpps]".format(item["title"])])
467         header.extend(
468             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
469              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
470              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
471              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
472              "Delta [%]"])
473         header_str = ",".join(header) + "\n"
474     except (AttributeError, KeyError) as err:
475         logging.error("The model is invalid, missing parameter: {0}".
476                       format(err))
477         return
478
479     # Prepare data to the table:
480     tbl_dict = dict()
481     for job, builds in table["reference"]["data"].items():
482         for build in builds:
483             for tst_name, tst_data in data[job][str(build)].iteritems():
484                 if table["reference"]["nic"] not in tst_data["tags"]:
485                     continue
486                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
487                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
488                     replace("-ndrdisc", "").replace("-pdr", "").\
489                     replace("-ndr", "").\
490                     replace("1t1c", "1c").replace("2t1c", "1c").\
491                     replace("2t2c", "2c").replace("4t2c", "2c").\
492                     replace("4t4c", "4c").replace("8t4c", "4c")
493                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
494                 if "across topologies" in table["title"].lower():
495                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
496                 if tbl_dict.get(tst_name_mod, None) is None:
497                     name = "{0}".format("-".join(tst_data["name"].
498                                                  split("-")[:-1]))
499                     if "across testbeds" in table["title"].lower() or \
500                             "across topologies" in table["title"].lower():
501                         name = name.\
502                             replace("1t1c", "1c").replace("2t1c", "1c").\
503                             replace("2t2c", "2c").replace("4t2c", "2c").\
504                             replace("4t4c", "4c").replace("8t4c", "4c")
505                     tbl_dict[tst_name_mod] = {"name": name,
506                                               "ref-data": list(),
507                                               "cmp-data": list()}
508                 try:
509                     # TODO: Re-work when NDRPDRDISC tests are not used
510                     if table["include-tests"] == "MRR":
511                         tbl_dict[tst_name_mod]["ref-data"]. \
512                             append(tst_data["result"]["receive-rate"].avg)
513                     elif table["include-tests"] == "PDR":
514                         if tst_data["type"] == "PDR":
515                             tbl_dict[tst_name_mod]["ref-data"]. \
516                                 append(tst_data["throughput"]["value"])
517                         elif tst_data["type"] == "NDRPDR":
518                             tbl_dict[tst_name_mod]["ref-data"].append(
519                                 tst_data["throughput"]["PDR"]["LOWER"])
520                     elif table["include-tests"] == "NDR":
521                         if tst_data["type"] == "NDR":
522                             tbl_dict[tst_name_mod]["ref-data"]. \
523                                 append(tst_data["throughput"]["value"])
524                         elif tst_data["type"] == "NDRPDR":
525                             tbl_dict[tst_name_mod]["ref-data"].append(
526                                 tst_data["throughput"]["NDR"]["LOWER"])
527                     else:
528                         continue
529                 except TypeError:
530                     pass  # No data in output.xml for this test
531
532     for job, builds in table["compare"]["data"].items():
533         for build in builds:
534             for tst_name, tst_data in data[job][str(build)].iteritems():
535                 if table["compare"]["nic"] not in tst_data["tags"]:
536                     continue
537                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
538                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
539                     replace("-ndrdisc", "").replace("-pdr", ""). \
540                     replace("-ndr", "").\
541                     replace("1t1c", "1c").replace("2t1c", "1c").\
542                     replace("2t2c", "2c").replace("4t2c", "2c").\
543                     replace("4t4c", "4c").replace("8t4c", "4c")
544                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
545                 if "across topologies" in table["title"].lower():
546                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
547                 if tbl_dict.get(tst_name_mod, None) is None:
548                     name = "{0}".format("-".join(tst_data["name"].
549                                                  split("-")[:-1]))
550                     if "across testbeds" in table["title"].lower() or \
551                             "across topologies" in table["title"].lower():
552                         name = name.\
553                             replace("1t1c", "1c").replace("2t1c", "1c").\
554                             replace("2t2c", "2c").replace("4t2c", "2c").\
555                             replace("4t4c", "4c").replace("8t4c", "4c")
556                     tbl_dict[tst_name_mod] = {"name": name,
557                                               "ref-data": list(),
558                                               "cmp-data": list()}
559                 try:
560                     # TODO: Re-work when NDRPDRDISC tests are not used
561                     if table["include-tests"] == "MRR":
562                         tbl_dict[tst_name_mod]["cmp-data"]. \
563                             append(tst_data["result"]["receive-rate"].avg)
564                     elif table["include-tests"] == "PDR":
565                         if tst_data["type"] == "PDR":
566                             tbl_dict[tst_name_mod]["cmp-data"]. \
567                                 append(tst_data["throughput"]["value"])
568                         elif tst_data["type"] == "NDRPDR":
569                             tbl_dict[tst_name_mod]["cmp-data"].append(
570                                 tst_data["throughput"]["PDR"]["LOWER"])
571                     elif table["include-tests"] == "NDR":
572                         if tst_data["type"] == "NDR":
573                             tbl_dict[tst_name_mod]["cmp-data"]. \
574                                 append(tst_data["throughput"]["value"])
575                         elif tst_data["type"] == "NDRPDR":
576                             tbl_dict[tst_name_mod]["cmp-data"].append(
577                                 tst_data["throughput"]["NDR"]["LOWER"])
578                     else:
579                         continue
580                 except (KeyError, TypeError):
581                     pass
582
583     if history:
584         for item in history:
585             for job, builds in item["data"].items():
586                 for build in builds:
587                     for tst_name, tst_data in data[job][str(build)].iteritems():
588                         if item["nic"] not in tst_data["tags"]:
589                             continue
590                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
591                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
592                             replace("-ndrdisc", "").replace("-pdr", ""). \
593                             replace("-ndr", "").\
594                             replace("1t1c", "1c").replace("2t1c", "1c").\
595                             replace("2t2c", "2c").replace("4t2c", "2c").\
596                             replace("4t4c", "4c").replace("8t4c", "4c")
597                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
598                         if "across topologies" in table["title"].lower():
599                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
600                         if tbl_dict.get(tst_name_mod, None) is None:
601                             continue
602                         if tbl_dict[tst_name_mod].get("history", None) is None:
603                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
604                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
605                                                              None) is None:
606                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
607                                 list()
608                         try:
609                             # TODO: Re-work when NDRPDRDISC tests are not used
610                             if table["include-tests"] == "MRR":
611                                 tbl_dict[tst_name_mod]["history"][item["title"
612                                 ]].append(tst_data["result"]["receive-rate"].
613                                           avg)
614                             elif table["include-tests"] == "PDR":
615                                 if tst_data["type"] == "PDR":
616                                     tbl_dict[tst_name_mod]["history"][
617                                         item["title"]].\
618                                         append(tst_data["throughput"]["value"])
619                                 elif tst_data["type"] == "NDRPDR":
620                                     tbl_dict[tst_name_mod]["history"][item[
621                                         "title"]].append(tst_data["throughput"][
622                                         "PDR"]["LOWER"])
623                             elif table["include-tests"] == "NDR":
624                                 if tst_data["type"] == "NDR":
625                                     tbl_dict[tst_name_mod]["history"][
626                                         item["title"]].\
627                                         append(tst_data["throughput"]["value"])
628                                 elif tst_data["type"] == "NDRPDR":
629                                     tbl_dict[tst_name_mod]["history"][item[
630                                         "title"]].append(tst_data["throughput"][
631                                         "NDR"]["LOWER"])
632                             else:
633                                 continue
634                         except (TypeError, KeyError):
635                             pass
636
637     tbl_lst = list()
638     for tst_name in tbl_dict.keys():
639         item = [tbl_dict[tst_name]["name"], ]
640         if history:
641             if tbl_dict[tst_name].get("history", None) is not None:
642                 for hist_data in tbl_dict[tst_name]["history"].values():
643                     if hist_data:
644                         item.append(round(mean(hist_data) / 1000000, 2))
645                         item.append(round(stdev(hist_data) / 1000000, 2))
646                     else:
647                         item.extend([None, None])
648             else:
649                 item.extend([None, None])
650         data_t = tbl_dict[tst_name]["ref-data"]
651         if data_t:
652             item.append(round(mean(data_t) / 1000000, 2))
653             item.append(round(stdev(data_t) / 1000000, 2))
654         else:
655             item.extend([None, None])
656         data_t = tbl_dict[tst_name]["cmp-data"]
657         if data_t:
658             item.append(round(mean(data_t) / 1000000, 2))
659             item.append(round(stdev(data_t) / 1000000, 2))
660         else:
661             item.extend([None, None])
662         if "dot1q" in tbl_dict[tst_name]["name"]:
663             item.append("Changed methodology")
664         elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
665             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
666         else:
667             item.append("n/a")
668         if (len(item) == len(header)) and (item[-5] is not None):
669             tbl_lst.append(item)
670
671     # Sort the table according to the relative change
672     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
673
674     # Generate csv tables:
675     csv_file = "{0}.csv".format(table["output-file"])
676     with open(csv_file, "w") as file_handler:
677         file_handler.write(header_str)
678         for test in tbl_lst:
679             file_handler.write(",".join([str(item) for item in test]) + "\n")
680
681     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
682
683
684 def table_nics_comparison(table, input_data):
685     """Generate the table(s) with algorithm: table_nics_comparison
686     specified in the specification file.
687
688     :param table: Table to generate.
689     :param input_data: Data to process.
690     :type table: pandas.Series
691     :type input_data: InputData
692     """
693
694     logging.info("  Generating the table {0} ...".
695                  format(table.get("title", "")))
696
697     # Transform the data
698     logging.info("    Creating the data set for the {0} '{1}'.".
699                  format(table.get("type", ""), table.get("title", "")))
700     data = input_data.filter_data(table, continue_on_error=True)
701
702     # Prepare the header of the tables
703     try:
704         header = ["Test case", ]
705
706         if table["include-tests"] == "MRR":
707             hdr_param = "Receive Rate"
708         else:
709             hdr_param = "Throughput"
710
711         header.extend(
712             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
713              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
714              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
715              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
716              "Delta [%]"])
717         header_str = ",".join(header) + "\n"
718     except (AttributeError, KeyError) as err:
719         logging.error("The model is invalid, missing parameter: {0}".
720                       format(err))
721         return
722
723     # Prepare data to the table:
724     tbl_dict = dict()
725     for job, builds in table["data"].items():
726         for build in builds:
727             for tst_name, tst_data in data[job][str(build)].iteritems():
728                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
729                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
730                     replace("-ndrdisc", "").replace("-pdr", "").\
731                     replace("-ndr", "").\
732                     replace("1t1c", "1c").replace("2t1c", "1c").\
733                     replace("2t2c", "2c").replace("4t2c", "2c").\
734                     replace("4t4c", "4c").replace("8t4c", "4c")
735                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
736                 if tbl_dict.get(tst_name_mod, None) is None:
737                     name = "-".join(tst_data["name"].split("-")[:-1])
738                     tbl_dict[tst_name_mod] = {"name": name,
739                                               "ref-data": list(),
740                                               "cmp-data": list()}
741                 try:
742                     if table["include-tests"] == "MRR":
743                         result = tst_data["result"]["receive-rate"].avg
744                     elif table["include-tests"] == "PDR":
745                         result = tst_data["throughput"]["PDR"]["LOWER"]
746                     elif table["include-tests"] == "NDR":
747                         result = tst_data["throughput"]["NDR"]["LOWER"]
748                     else:
749                         result = None
750
751                     if result:
752                         if table["reference"]["nic"] in tst_data["tags"]:
753                             tbl_dict[tst_name_mod]["ref-data"].append(result)
754                         elif table["compare"]["nic"] in tst_data["tags"]:
755                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
756                 except (TypeError, KeyError) as err:
757                     logging.debug("No data for {0}".format(tst_name))
758                     logging.debug(repr(err))
759                     # No data in output.xml for this test
760
761     tbl_lst = list()
762     for tst_name in tbl_dict.keys():
763         item = [tbl_dict[tst_name]["name"], ]
764         data_t = tbl_dict[tst_name]["ref-data"]
765         if data_t:
766             item.append(round(mean(data_t) / 1000000, 2))
767             item.append(round(stdev(data_t) / 1000000, 2))
768         else:
769             item.extend([None, None])
770         data_t = tbl_dict[tst_name]["cmp-data"]
771         if data_t:
772             item.append(round(mean(data_t) / 1000000, 2))
773             item.append(round(stdev(data_t) / 1000000, 2))
774         else:
775             item.extend([None, None])
776         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
777             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
778         if len(item) == len(header):
779             tbl_lst.append(item)
780
781     # Sort the table according to the relative change
782     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
783
784     # Generate csv tables:
785     csv_file = "{0}.csv".format(table["output-file"])
786     with open(csv_file, "w") as file_handler:
787         file_handler.write(header_str)
788         for test in tbl_lst:
789             file_handler.write(",".join([str(item) for item in test]) + "\n")
790
791     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
792
793
794 def table_soak_vs_ndr(table, input_data):
795     """Generate the table(s) with algorithm: table_soak_vs_ndr
796     specified in the specification file.
797
798     :param table: Table to generate.
799     :param input_data: Data to process.
800     :type table: pandas.Series
801     :type input_data: InputData
802     """
803
804     logging.info("  Generating the table {0} ...".
805                  format(table.get("title", "")))
806
807     # Transform the data
808     logging.info("    Creating the data set for the {0} '{1}'.".
809                  format(table.get("type", ""), table.get("title", "")))
810     data = input_data.filter_data(table, continue_on_error=True)
811
812     # Prepare the header of the table
813     try:
814         header = [
815             "Test case",
816             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
817             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
818             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
819             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
820             "Delta [%]", "Stdev of delta [%]"]
821         header_str = ",".join(header) + "\n"
822     except (AttributeError, KeyError) as err:
823         logging.error("The model is invalid, missing parameter: {0}".
824                       format(err))
825         return
826
827     # Create a list of available SOAK test results:
828     tbl_dict = dict()
829     for job, builds in table["compare"]["data"].items():
830         for build in builds:
831             for tst_name, tst_data in data[job][str(build)].iteritems():
832                 if tst_data["type"] == "SOAK":
833                     tst_name_mod = tst_name.replace("-soak", "")
834                     if tbl_dict.get(tst_name_mod, None) is None:
835                         groups = re.search(REGEX_NIC, tst_data["parent"])
836                         nic = groups.group(0) if groups else ""
837                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
838                                                               split("-")[:-1]))
839                         tbl_dict[tst_name_mod] = {
840                             "name": name,
841                             "ref-data": list(),
842                             "cmp-data": list()
843                         }
844                     try:
845                         tbl_dict[tst_name_mod]["cmp-data"].append(
846                             tst_data["throughput"]["LOWER"])
847                     except (KeyError, TypeError):
848                         pass
849     tests_lst = tbl_dict.keys()
850
851     # Add corresponding NDR test results:
852     for job, builds in table["reference"]["data"].items():
853         for build in builds:
854             for tst_name, tst_data in data[job][str(build)].iteritems():
855                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
856                     replace("-mrr", "")
857                 if tst_name_mod in tests_lst:
858                     try:
859                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
860                             if table["include-tests"] == "MRR":
861                                 result = tst_data["result"]["receive-rate"].avg
862                             elif table["include-tests"] == "PDR":
863                                 result = tst_data["throughput"]["PDR"]["LOWER"]
864                             elif table["include-tests"] == "NDR":
865                                 result = tst_data["throughput"]["NDR"]["LOWER"]
866                             else:
867                                 result = None
868                             if result is not None:
869                                 tbl_dict[tst_name_mod]["ref-data"].append(
870                                     result)
871                     except (KeyError, TypeError):
872                         continue
873
874     tbl_lst = list()
875     for tst_name in tbl_dict.keys():
876         item = [tbl_dict[tst_name]["name"], ]
877         data_r = tbl_dict[tst_name]["ref-data"]
878         if data_r:
879             data_r_mean = mean(data_r)
880             item.append(round(data_r_mean / 1000000, 2))
881             data_r_stdev = stdev(data_r)
882             item.append(round(data_r_stdev / 1000000, 2))
883         else:
884             data_r_mean = None
885             data_r_stdev = None
886             item.extend([None, None])
887         data_c = tbl_dict[tst_name]["cmp-data"]
888         if data_c:
889             data_c_mean = mean(data_c)
890             item.append(round(data_c_mean / 1000000, 2))
891             data_c_stdev = stdev(data_c)
892             item.append(round(data_c_stdev / 1000000, 2))
893         else:
894             data_c_mean = None
895             data_c_stdev = None
896             item.extend([None, None])
897         if data_r_mean and data_c_mean:
898             delta, d_stdev = relative_change_stdev(
899                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
900             item.append(round(delta, 2))
901             item.append(round(d_stdev, 2))
902             tbl_lst.append(item)
903
904     # Sort the table according to the relative change
905     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
906
907     # Generate csv tables:
908     csv_file = "{0}.csv".format(table["output-file"])
909     with open(csv_file, "w") as file_handler:
910         file_handler.write(header_str)
911         for test in tbl_lst:
912             file_handler.write(",".join([str(item) for item in test]) + "\n")
913
914     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
915
916
917 def table_performance_trending_dashboard(table, input_data):
918     """Generate the table(s) with algorithm:
919     table_performance_trending_dashboard
920     specified in the specification file.
921
922     :param table: Table to generate.
923     :param input_data: Data to process.
924     :type table: pandas.Series
925     :type input_data: InputData
926     """
927
928     logging.info("  Generating the table {0} ...".
929                  format(table.get("title", "")))
930
931     # Transform the data
932     logging.info("    Creating the data set for the {0} '{1}'.".
933                  format(table.get("type", ""), table.get("title", "")))
934     data = input_data.filter_data(table, continue_on_error=True)
935
936     # Prepare the header of the tables
937     header = ["Test Case",
938               "Trend [Mpps]",
939               "Short-Term Change [%]",
940               "Long-Term Change [%]",
941               "Regressions [#]",
942               "Progressions [#]"
943               ]
944     header_str = ",".join(header) + "\n"
945
946     # Prepare data to the table:
947     tbl_dict = dict()
948     for job, builds in table["data"].items():
949         for build in builds:
950             for tst_name, tst_data in data[job][str(build)].iteritems():
951                 if tst_name.lower() in table.get("ignore-list", list()):
952                     continue
953                 if tbl_dict.get(tst_name, None) is None:
954                     groups = re.search(REGEX_NIC, tst_data["parent"])
955                     if not groups:
956                         continue
957                     nic = groups.group(0)
958                     tbl_dict[tst_name] = {
959                         "name": "{0}-{1}".format(nic, tst_data["name"]),
960                         "data": OrderedDict()}
961                 try:
962                     tbl_dict[tst_name]["data"][str(build)] = \
963                         tst_data["result"]["receive-rate"]
964                 except (TypeError, KeyError):
965                     pass  # No data in output.xml for this test
966
967     tbl_lst = list()
968     for tst_name in tbl_dict.keys():
969         data_t = tbl_dict[tst_name]["data"]
970         if len(data_t) < 2:
971             continue
972
973         classification_lst, avgs = classify_anomalies(data_t)
974
975         win_size = min(len(data_t), table["window"])
976         long_win_size = min(len(data_t), table["long-trend-window"])
977
978         try:
979             max_long_avg = max(
980                 [x for x in avgs[-long_win_size:-win_size]
981                  if not isnan(x)])
982         except ValueError:
983             max_long_avg = nan
984         last_avg = avgs[-1]
985         avg_week_ago = avgs[max(-win_size, -len(avgs))]
986
987         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
988             rel_change_last = nan
989         else:
990             rel_change_last = round(
991                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
992
993         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
994             rel_change_long = nan
995         else:
996             rel_change_long = round(
997                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
998
999         if classification_lst:
1000             if isnan(rel_change_last) and isnan(rel_change_long):
1001                 continue
1002             if (isnan(last_avg) or
1003                 isnan(rel_change_last) or
1004                 isnan(rel_change_long)):
1005                 continue
1006             tbl_lst.append(
1007                 [tbl_dict[tst_name]["name"],
1008                  round(last_avg / 1000000, 2),
1009                  rel_change_last,
1010                  rel_change_long,
1011                  classification_lst[-win_size:].count("regression"),
1012                  classification_lst[-win_size:].count("progression")])
1013
1014     tbl_lst.sort(key=lambda rel: rel[0])
1015
1016     tbl_sorted = list()
1017     for nrr in range(table["window"], -1, -1):
1018         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1019         for nrp in range(table["window"], -1, -1):
1020             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1021             tbl_out.sort(key=lambda rel: rel[2])
1022             tbl_sorted.extend(tbl_out)
1023
1024     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1025
1026     logging.info("    Writing file: '{0}'".format(file_name))
1027     with open(file_name, "w") as file_handler:
1028         file_handler.write(header_str)
1029         for test in tbl_sorted:
1030             file_handler.write(",".join([str(item) for item in test]) + '\n')
1031
1032     txt_file_name = "{0}.txt".format(table["output-file"])
1033     logging.info("    Writing file: '{0}'".format(txt_file_name))
1034     convert_csv_to_pretty_txt(file_name, txt_file_name)
1035
1036
1037 def _generate_url(base, testbed, test_name):
1038     """Generate URL to a trending plot from the name of the test case.
1039
1040     :param base: The base part of URL common to all test cases.
1041     :param testbed: The testbed used for testing.
1042     :param test_name: The name of the test case.
1043     :type base: str
1044     :type testbed: str
1045     :type test_name: str
1046     :returns: The URL to the plot with the trending data for the given test
1047         case.
1048     :rtype str
1049     """
1050
1051     url = base
1052     file_name = ""
1053     anchor = ".html#"
1054     feature = ""
1055
1056     if "lbdpdk" in test_name or "lbvpp" in test_name:
1057         file_name = "link_bonding"
1058
1059     elif "114b" in test_name and "vhost" in test_name:
1060         file_name = "vts"
1061
1062     elif "testpmd" in test_name or "l3fwd" in test_name:
1063         file_name = "dpdk"
1064
1065     elif "memif" in test_name:
1066         file_name = "container_memif"
1067         feature = "-base"
1068
1069     elif "srv6" in test_name:
1070         file_name = "srv6"
1071
1072     elif "vhost" in test_name:
1073         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1074             file_name = "vm_vhost_l2"
1075             if "114b" in test_name:
1076                 feature = ""
1077             elif "l2xcbase" in test_name and "x520" in test_name:
1078                 feature = "-base-l2xc"
1079             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1080                 feature = "-base-l2bd"
1081             else:
1082                 feature = "-base"
1083         elif "ip4base" in test_name:
1084             file_name = "vm_vhost_ip4"
1085             feature = "-base"
1086
1087     elif "ipsecbasetnlsw" in test_name:
1088         file_name = "ipsecsw"
1089         feature = "-base-scale"
1090
1091     elif "ipsec" in test_name:
1092         file_name = "ipsec"
1093         feature = "-base-scale"
1094         if "hw-" in test_name:
1095             file_name = "ipsechw"
1096         elif "sw-" in test_name:
1097             file_name = "ipsecsw"
1098         if "-int-" in test_name:
1099             feature = "-base-scale-int"
1100         elif "tnl" in test_name:
1101             feature = "-base-scale-tnl"
1102
1103     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1104         file_name = "ip4_tunnels"
1105         feature = "-base"
1106
1107     elif "ip4base" in test_name or "ip4scale" in test_name:
1108         file_name = "ip4"
1109         if "xl710" in test_name:
1110             feature = "-base-scale-features"
1111         elif "iacl" in test_name:
1112             feature = "-features-iacl"
1113         elif "oacl" in test_name:
1114             feature = "-features-oacl"
1115         elif "snat" in test_name or "cop" in test_name:
1116             feature = "-features"
1117         else:
1118             feature = "-base-scale"
1119
1120     elif "ip6base" in test_name or "ip6scale" in test_name:
1121         file_name = "ip6"
1122         feature = "-base-scale"
1123
1124     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1125             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1126             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1127         file_name = "l2"
1128         if "macip" in test_name:
1129             feature = "-features-macip"
1130         elif "iacl" in test_name:
1131             feature = "-features-iacl"
1132         elif "oacl" in test_name:
1133             feature = "-features-oacl"
1134         else:
1135             feature = "-base-scale"
1136
1137     if "x520" in test_name:
1138         nic = "x520-"
1139     elif "x710" in test_name:
1140         nic = "x710-"
1141     elif "xl710" in test_name:
1142         nic = "xl710-"
1143     elif "xxv710" in test_name:
1144         nic = "xxv710-"
1145     elif "vic1227" in test_name:
1146         nic = "vic1227-"
1147     elif "vic1385" in test_name:
1148         nic = "vic1385-"
1149     elif "x553" in test_name:
1150         nic = "x553-"
1151     else:
1152         nic = ""
1153     anchor += nic
1154
1155     if "64b" in test_name:
1156         framesize = "64b"
1157     elif "78b" in test_name:
1158         framesize = "78b"
1159     elif "imix" in test_name:
1160         framesize = "imix"
1161     elif "9000b" in test_name:
1162         framesize = "9000b"
1163     elif "1518b" in test_name:
1164         framesize = "1518b"
1165     elif "114b" in test_name:
1166         framesize = "114b"
1167     else:
1168         framesize = ""
1169     anchor += framesize + '-'
1170
1171     if "1t1c" in test_name:
1172         anchor += "1t1c"
1173     elif "2t2c" in test_name:
1174         anchor += "2t2c"
1175     elif "4t4c" in test_name:
1176         anchor += "4t4c"
1177     elif "2t1c" in test_name:
1178         anchor += "2t1c"
1179     elif "4t2c" in test_name:
1180         anchor += "4t2c"
1181     elif "8t4c" in test_name:
1182         anchor += "8t4c"
1183
1184     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1185         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1186
1187
1188 def table_performance_trending_dashboard_html(table, input_data):
1189     """Generate the table(s) with algorithm:
1190     table_performance_trending_dashboard_html specified in the specification
1191     file.
1192
1193     :param table: Table to generate.
1194     :param input_data: Data to process.
1195     :type table: dict
1196     :type input_data: InputData
1197     """
1198
1199     testbed = table.get("testbed", None)
1200     if testbed is None:
1201         logging.error("The testbed is not defined for the table '{0}'.".
1202                       format(table.get("title", "")))
1203         return
1204
1205     logging.info("  Generating the table {0} ...".
1206                  format(table.get("title", "")))
1207
1208     try:
1209         with open(table["input-file"], 'rb') as csv_file:
1210             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1211             csv_lst = [item for item in csv_content]
1212     except KeyError:
1213         logging.warning("The input file is not defined.")
1214         return
1215     except csv.Error as err:
1216         logging.warning("Not possible to process the file '{0}'.\n{1}".
1217                         format(table["input-file"], err))
1218         return
1219
1220     # Table:
1221     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1222
1223     # Table header:
1224     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1225     for idx, item in enumerate(csv_lst[0]):
1226         alignment = "left" if idx == 0 else "center"
1227         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1228         th.text = item
1229
1230     # Rows:
1231     colors = {"regression": ("#ffcccc", "#ff9999"),
1232               "progression": ("#c6ecc6", "#9fdf9f"),
1233               "normal": ("#e9f1fb", "#d4e4f7")}
1234     for r_idx, row in enumerate(csv_lst[1:]):
1235         if int(row[4]):
1236             color = "regression"
1237         elif int(row[5]):
1238             color = "progression"
1239         else:
1240             color = "normal"
1241         background = colors[color][r_idx % 2]
1242         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1243
1244         # Columns:
1245         for c_idx, item in enumerate(row):
1246             alignment = "left" if c_idx == 0 else "center"
1247             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1248             # Name:
1249             if c_idx == 0:
1250                 url = _generate_url("../trending/", testbed, item)
1251                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1252                 ref.text = item
1253             else:
1254                 td.text = item
1255     try:
1256         with open(table["output-file"], 'w') as html_file:
1257             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1258             html_file.write(".. raw:: html\n\n\t")
1259             html_file.write(ET.tostring(dashboard))
1260             html_file.write("\n\t<p><br><br></p>\n")
1261     except KeyError:
1262         logging.warning("The output file is not defined.")
1263         return
1264
1265
1266 def table_last_failed_tests(table, input_data):
1267     """Generate the table(s) with algorithm: table_last_failed_tests
1268     specified in the specification file.
1269
1270     :param table: Table to generate.
1271     :param input_data: Data to process.
1272     :type table: pandas.Series
1273     :type input_data: InputData
1274     """
1275
1276     logging.info("  Generating the table {0} ...".
1277                  format(table.get("title", "")))
1278
1279     # Transform the data
1280     logging.info("    Creating the data set for the {0} '{1}'.".
1281                  format(table.get("type", ""), table.get("title", "")))
1282     data = input_data.filter_data(table, continue_on_error=True)
1283
1284     if data is None or data.empty:
1285         logging.warn("    No data for the {0} '{1}'.".
1286                      format(table.get("type", ""), table.get("title", "")))
1287         return
1288
1289     tbl_list = list()
1290     for job, builds in table["data"].items():
1291         for build in builds:
1292             build = str(build)
1293             try:
1294                 version = input_data.metadata(job, build).get("version", "")
1295             except KeyError:
1296                 logging.error("Data for {job}: {build} is not present.".
1297                               format(job=job, build=build))
1298                 return
1299             tbl_list.append(build)
1300             tbl_list.append(version)
1301             for tst_name, tst_data in data[job][build].iteritems():
1302                 if tst_data["status"] != "FAIL":
1303                     continue
1304                 groups = re.search(REGEX_NIC, tst_data["parent"])
1305                 if not groups:
1306                     continue
1307                 nic = groups.group(0)
1308                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1309
1310     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1311     logging.info("    Writing file: '{0}'".format(file_name))
1312     with open(file_name, "w") as file_handler:
1313         for test in tbl_list:
1314             file_handler.write(test + '\n')
1315
1316
1317 def table_failed_tests(table, input_data):
1318     """Generate the table(s) with algorithm: table_failed_tests
1319     specified in the specification file.
1320
1321     :param table: Table to generate.
1322     :param input_data: Data to process.
1323     :type table: pandas.Series
1324     :type input_data: InputData
1325     """
1326
1327     logging.info("  Generating the table {0} ...".
1328                  format(table.get("title", "")))
1329
1330     # Transform the data
1331     logging.info("    Creating the data set for the {0} '{1}'.".
1332                  format(table.get("type", ""), table.get("title", "")))
1333     data = input_data.filter_data(table, continue_on_error=True)
1334
1335     # Prepare the header of the tables
1336     header = ["Test Case",
1337               "Failures [#]",
1338               "Last Failure [Time]",
1339               "Last Failure [VPP-Build-Id]",
1340               "Last Failure [CSIT-Job-Build-Id]"]
1341
1342     # Generate the data for the table according to the model in the table
1343     # specification
1344
1345     now = dt.utcnow()
1346     timeperiod = timedelta(int(table.get("window", 7)))
1347
1348     tbl_dict = dict()
1349     for job, builds in table["data"].items():
1350         for build in builds:
1351             build = str(build)
1352             for tst_name, tst_data in data[job][build].iteritems():
1353                 if tst_name.lower() in table.get("ignore-list", list()):
1354                     continue
1355                 if tbl_dict.get(tst_name, None) is None:
1356                     groups = re.search(REGEX_NIC, tst_data["parent"])
1357                     if not groups:
1358                         continue
1359                     nic = groups.group(0)
1360                     tbl_dict[tst_name] = {
1361                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1362                         "data": OrderedDict()}
1363                 try:
1364                     generated = input_data.metadata(job, build).\
1365                         get("generated", "")
1366                     if not generated:
1367                         continue
1368                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1369                     if (now - then) <= timeperiod:
1370                         tbl_dict[tst_name]["data"][build] = (
1371                             tst_data["status"],
1372                             generated,
1373                             input_data.metadata(job, build).get("version", ""),
1374                             build)
1375                 except (TypeError, KeyError) as err:
1376                     logging.warning("tst_name: {} - err: {}".
1377                                     format(tst_name, repr(err)))
1378
1379     max_fails = 0
1380     tbl_lst = list()
1381     for tst_data in tbl_dict.values():
1382         fails_nr = 0
1383         for val in tst_data["data"].values():
1384             if val[0] == "FAIL":
1385                 fails_nr += 1
1386                 fails_last_date = val[1]
1387                 fails_last_vpp = val[2]
1388                 fails_last_csit = val[3]
1389         if fails_nr:
1390             max_fails = fails_nr if fails_nr > max_fails else max_fails
1391             tbl_lst.append([tst_data["name"],
1392                             fails_nr,
1393                             fails_last_date,
1394                             fails_last_vpp,
1395                             "mrr-daily-build-{0}".format(fails_last_csit)])
1396
1397     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1398     tbl_sorted = list()
1399     for nrf in range(max_fails, -1, -1):
1400         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1401         tbl_sorted.extend(tbl_fails)
1402     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1403
1404     logging.info("    Writing file: '{0}'".format(file_name))
1405     with open(file_name, "w") as file_handler:
1406         file_handler.write(",".join(header) + "\n")
1407         for test in tbl_sorted:
1408             file_handler.write(",".join([str(item) for item in test]) + '\n')
1409
1410     txt_file_name = "{0}.txt".format(table["output-file"])
1411     logging.info("    Writing file: '{0}'".format(txt_file_name))
1412     convert_csv_to_pretty_txt(file_name, txt_file_name)
1413
1414
1415 def table_failed_tests_html(table, input_data):
1416     """Generate the table(s) with algorithm: table_failed_tests_html
1417     specified in the specification file.
1418
1419     :param table: Table to generate.
1420     :param input_data: Data to process.
1421     :type table: pandas.Series
1422     :type input_data: InputData
1423     """
1424
1425     testbed = table.get("testbed", None)
1426     if testbed is None:
1427         logging.error("The testbed is not defined for the table '{0}'.".
1428                       format(table.get("title", "")))
1429         return
1430
1431     logging.info("  Generating the table {0} ...".
1432                  format(table.get("title", "")))
1433
1434     try:
1435         with open(table["input-file"], 'rb') as csv_file:
1436             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1437             csv_lst = [item for item in csv_content]
1438     except KeyError:
1439         logging.warning("The input file is not defined.")
1440         return
1441     except csv.Error as err:
1442         logging.warning("Not possible to process the file '{0}'.\n{1}".
1443                         format(table["input-file"], err))
1444         return
1445
1446     # Table:
1447     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1448
1449     # Table header:
1450     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1451     for idx, item in enumerate(csv_lst[0]):
1452         alignment = "left" if idx == 0 else "center"
1453         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1454         th.text = item
1455
1456     # Rows:
1457     colors = ("#e9f1fb", "#d4e4f7")
1458     for r_idx, row in enumerate(csv_lst[1:]):
1459         background = colors[r_idx % 2]
1460         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1461
1462         # Columns:
1463         for c_idx, item in enumerate(row):
1464             alignment = "left" if c_idx == 0 else "center"
1465             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1466             # Name:
1467             if c_idx == 0:
1468                 url = _generate_url("../trending/", testbed, item)
1469                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1470                 ref.text = item
1471             else:
1472                 td.text = item
1473     try:
1474         with open(table["output-file"], 'w') as html_file:
1475             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1476             html_file.write(".. raw:: html\n\n\t")
1477             html_file.write(ET.tostring(failed_tests))
1478             html_file.write("\n\t<p><br><br></p>\n")
1479     except KeyError:
1480         logging.warning("The output file is not defined.")
1481         return