CSIT-1590: Performance comparison analysis
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 if tbl_dict.get(tst_name_mod, None) is None:
300                     groups = re.search(REGEX_NIC, tst_data["parent"])
301                     nic = groups.group(0) if groups else ""
302                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
303                                                           split("-")[:-1]))
304                     if "across testbeds" in table["title"].lower() or \
305                             "across topologies" in table["title"].lower():
306                         name = name.\
307                             replace("1t1c", "1c").replace("2t1c", "1c").\
308                             replace("2t2c", "2c").replace("4t2c", "2c").\
309                             replace("4t4c", "4c").replace("8t4c", "4c")
310                     tbl_dict[tst_name_mod] = {"name": name,
311                                               "ref-data": list(),
312                                               "cmp-data": list()}
313                 try:
314                     # TODO: Re-work when NDRPDRDISC tests are not used
315                     if table["include-tests"] == "MRR":
316                         tbl_dict[tst_name_mod]["cmp-data"]. \
317                             append(tst_data["result"]["receive-rate"].avg)
318                     elif table["include-tests"] == "PDR":
319                         if tst_data["type"] == "PDR":
320                             tbl_dict[tst_name_mod]["cmp-data"]. \
321                                 append(tst_data["throughput"]["value"])
322                         elif tst_data["type"] == "NDRPDR":
323                             tbl_dict[tst_name_mod]["cmp-data"].append(
324                                 tst_data["throughput"]["PDR"]["LOWER"])
325                     elif table["include-tests"] == "NDR":
326                         if tst_data["type"] == "NDR":
327                             tbl_dict[tst_name_mod]["cmp-data"]. \
328                                 append(tst_data["throughput"]["value"])
329                         elif tst_data["type"] == "NDRPDR":
330                             tbl_dict[tst_name_mod]["cmp-data"].append(
331                                 tst_data["throughput"]["NDR"]["LOWER"])
332                     else:
333                         continue
334                 except (KeyError, TypeError):
335                     pass
336     if history:
337         for item in history:
338             for job, builds in item["data"].items():
339                 for build in builds:
340                     for tst_name, tst_data in data[job][str(build)].iteritems():
341                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343                             replace("-ndrdisc", "").replace("-pdr", ""). \
344                             replace("-ndr", "").\
345                             replace("1t1c", "1c").replace("2t1c", "1c").\
346                             replace("2t2c", "2c").replace("4t2c", "2c").\
347                             replace("4t4c", "4c").replace("8t4c", "4c")
348                         if "across topologies" in table["title"].lower():
349                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
350                         if tbl_dict.get(tst_name_mod, None) is None:
351                             continue
352                         if tbl_dict[tst_name_mod].get("history", None) is None:
353                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
354                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
355                                                              None) is None:
356                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
357                                 list()
358                         try:
359                             # TODO: Re-work when NDRPDRDISC tests are not used
360                             if table["include-tests"] == "MRR":
361                                 tbl_dict[tst_name_mod]["history"][item["title"
362                                 ]].append(tst_data["result"]["receive-rate"].
363                                           avg)
364                             elif table["include-tests"] == "PDR":
365                                 if tst_data["type"] == "PDR":
366                                     tbl_dict[tst_name_mod]["history"][
367                                         item["title"]].\
368                                         append(tst_data["throughput"]["value"])
369                                 elif tst_data["type"] == "NDRPDR":
370                                     tbl_dict[tst_name_mod]["history"][item[
371                                         "title"]].append(tst_data["throughput"][
372                                         "PDR"]["LOWER"])
373                             elif table["include-tests"] == "NDR":
374                                 if tst_data["type"] == "NDR":
375                                     tbl_dict[tst_name_mod]["history"][
376                                         item["title"]].\
377                                         append(tst_data["throughput"]["value"])
378                                 elif tst_data["type"] == "NDRPDR":
379                                     tbl_dict[tst_name_mod]["history"][item[
380                                         "title"]].append(tst_data["throughput"][
381                                         "NDR"]["LOWER"])
382                             else:
383                                 continue
384                         except (TypeError, KeyError):
385                             pass
386
387     tbl_lst = list()
388     for tst_name in tbl_dict.keys():
389         item = [tbl_dict[tst_name]["name"], ]
390         if history:
391             if tbl_dict[tst_name].get("history", None) is not None:
392                 for hist_data in tbl_dict[tst_name]["history"].values():
393                     if hist_data:
394                         item.append(round(mean(hist_data) / 1000000, 2))
395                         item.append(round(stdev(hist_data) / 1000000, 2))
396                     else:
397                         item.extend([None, None])
398             else:
399                 item.extend([None, None])
400         data_t = tbl_dict[tst_name]["ref-data"]
401         if data_t:
402             item.append(round(mean(data_t) / 1000000, 2))
403             item.append(round(stdev(data_t) / 1000000, 2))
404         else:
405             item.extend([None, None])
406         data_t = tbl_dict[tst_name]["cmp-data"]
407         if data_t:
408             item.append(round(mean(data_t) / 1000000, 2))
409             item.append(round(stdev(data_t) / 1000000, 2))
410         else:
411             item.extend([None, None])
412         if "dot1q" in tbl_dict[tst_name]["name"]:
413             item.append("Changed methodology")
414         elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
415             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
416         else:
417             item.append("n/a")
418         if (len(item) == len(header)) and (item[-5] is not None):
419             tbl_lst.append(item)
420
421     # Sort the table according to the relative change
422     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
423
424     # Generate csv tables:
425     csv_file = "{0}.csv".format(table["output-file"])
426     with open(csv_file, "w") as file_handler:
427         file_handler.write(header_str)
428         for test in tbl_lst:
429             file_handler.write(",".join([str(item) for item in test]) + "\n")
430
431     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
432
433
434 def table_performance_comparison_nic(table, input_data):
435     """Generate the table(s) with algorithm: table_performance_comparison
436     specified in the specification file.
437
438     :param table: Table to generate.
439     :param input_data: Data to process.
440     :type table: pandas.Series
441     :type input_data: InputData
442     """
443
444     logging.info("  Generating the table {0} ...".
445                  format(table.get("title", "")))
446
447     # Transform the data
448     logging.info("    Creating the data set for the {0} '{1}'.".
449                  format(table.get("type", ""), table.get("title", "")))
450     data = input_data.filter_data(table, continue_on_error=True)
451
452     # Prepare the header of the tables
453     try:
454         header = ["Test case", ]
455
456         if table["include-tests"] == "MRR":
457             hdr_param = "Receive Rate"
458         else:
459             hdr_param = "Throughput"
460
461         history = table.get("history", None)
462         if history:
463             for item in history:
464                 header.extend(
465                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
466                      "{0} Stdev [Mpps]".format(item["title"])])
467         header.extend(
468             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
469              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
470              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
471              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
472              "Delta [%]"])
473         header_str = ",".join(header) + "\n"
474     except (AttributeError, KeyError) as err:
475         logging.error("The model is invalid, missing parameter: {0}".
476                       format(err))
477         return
478
479     # Prepare data to the table:
480     tbl_dict = dict()
481     for job, builds in table["reference"]["data"].items():
482         for build in builds:
483             for tst_name, tst_data in data[job][str(build)].iteritems():
484                 if table["reference"]["nic"] not in tst_data["tags"]:
485                     continue
486                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
487                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
488                     replace("-ndrdisc", "").replace("-pdr", "").\
489                     replace("-ndr", "").\
490                     replace("1t1c", "1c").replace("2t1c", "1c").\
491                     replace("2t2c", "2c").replace("4t2c", "2c").\
492                     replace("4t4c", "4c").replace("8t4c", "4c")
493                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
494                 if "across topologies" in table["title"].lower():
495                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
496                 if tbl_dict.get(tst_name_mod, None) is None:
497                     name = "{0}".format("-".join(tst_data["name"].
498                                                  split("-")[:-1]))
499                     if "across testbeds" in table["title"].lower() or \
500                             "across topologies" in table["title"].lower():
501                         name = name.\
502                             replace("1t1c", "1c").replace("2t1c", "1c").\
503                             replace("2t2c", "2c").replace("4t2c", "2c").\
504                             replace("4t4c", "4c").replace("8t4c", "4c")
505                     tbl_dict[tst_name_mod] = {"name": name,
506                                               "ref-data": list(),
507                                               "cmp-data": list()}
508                 try:
509                     # TODO: Re-work when NDRPDRDISC tests are not used
510                     if table["include-tests"] == "MRR":
511                         tbl_dict[tst_name_mod]["ref-data"]. \
512                             append(tst_data["result"]["receive-rate"].avg)
513                     elif table["include-tests"] == "PDR":
514                         if tst_data["type"] == "PDR":
515                             tbl_dict[tst_name_mod]["ref-data"]. \
516                                 append(tst_data["throughput"]["value"])
517                         elif tst_data["type"] == "NDRPDR":
518                             tbl_dict[tst_name_mod]["ref-data"].append(
519                                 tst_data["throughput"]["PDR"]["LOWER"])
520                     elif table["include-tests"] == "NDR":
521                         if tst_data["type"] == "NDR":
522                             tbl_dict[tst_name_mod]["ref-data"]. \
523                                 append(tst_data["throughput"]["value"])
524                         elif tst_data["type"] == "NDRPDR":
525                             tbl_dict[tst_name_mod]["ref-data"].append(
526                                 tst_data["throughput"]["NDR"]["LOWER"])
527                     else:
528                         continue
529                 except TypeError:
530                     pass  # No data in output.xml for this test
531
532     for job, builds in table["compare"]["data"].items():
533         for build in builds:
534             for tst_name, tst_data in data[job][str(build)].iteritems():
535                 if table["compare"]["nic"] not in tst_data["tags"]:
536                     continue
537                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
538                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
539                     replace("-ndrdisc", "").replace("-pdr", ""). \
540                     replace("-ndr", "").\
541                     replace("1t1c", "1c").replace("2t1c", "1c").\
542                     replace("2t2c", "2c").replace("4t2c", "2c").\
543                     replace("4t4c", "4c").replace("8t4c", "4c")
544                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
545                 if "across topologies" in table["title"].lower():
546                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
547                 if tbl_dict.get(tst_name_mod, None) is None:
548                     name = "{0}".format("-".join(tst_data["name"].
549                                                  split("-")[:-1]))
550                     if "across testbeds" in table["title"].lower() or \
551                             "across topologies" in table["title"].lower():
552                         name = name.\
553                             replace("1t1c", "1c").replace("2t1c", "1c").\
554                             replace("2t2c", "2c").replace("4t2c", "2c").\
555                             replace("4t4c", "4c").replace("8t4c", "4c")
556                     tbl_dict[tst_name_mod] = {"name": name,
557                                               "ref-data": list(),
558                                               "cmp-data": list()}
559                 try:
560                     # TODO: Re-work when NDRPDRDISC tests are not used
561                     if table["include-tests"] == "MRR":
562                         tbl_dict[tst_name_mod]["cmp-data"]. \
563                             append(tst_data["result"]["receive-rate"].avg)
564                     elif table["include-tests"] == "PDR":
565                         if tst_data["type"] == "PDR":
566                             tbl_dict[tst_name_mod]["cmp-data"]. \
567                                 append(tst_data["throughput"]["value"])
568                         elif tst_data["type"] == "NDRPDR":
569                             tbl_dict[tst_name_mod]["cmp-data"].append(
570                                 tst_data["throughput"]["PDR"]["LOWER"])
571                     elif table["include-tests"] == "NDR":
572                         if tst_data["type"] == "NDR":
573                             tbl_dict[tst_name_mod]["cmp-data"]. \
574                                 append(tst_data["throughput"]["value"])
575                         elif tst_data["type"] == "NDRPDR":
576                             tbl_dict[tst_name_mod]["cmp-data"].append(
577                                 tst_data["throughput"]["NDR"]["LOWER"])
578                     else:
579                         continue
580                 except (KeyError, TypeError):
581                     pass
582
583     if history:
584         for item in history:
585             for job, builds in item["data"].items():
586                 for build in builds:
587                     for tst_name, tst_data in data[job][str(build)].iteritems():
588                         if item["nic"] not in tst_data["tags"]:
589                             continue
590                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
591                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
592                             replace("-ndrdisc", "").replace("-pdr", ""). \
593                             replace("-ndr", "").\
594                             replace("1t1c", "1c").replace("2t1c", "1c").\
595                             replace("2t2c", "2c").replace("4t2c", "2c").\
596                             replace("4t4c", "4c").replace("8t4c", "4c")
597                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
598                         if "across topologies" in table["title"].lower():
599                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
600                         if tbl_dict.get(tst_name_mod, None) is None:
601                             continue
602                         if tbl_dict[tst_name_mod].get("history", None) is None:
603                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
604                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
605                                                              None) is None:
606                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
607                                 list()
608                         try:
609                             # TODO: Re-work when NDRPDRDISC tests are not used
610                             if table["include-tests"] == "MRR":
611                                 tbl_dict[tst_name_mod]["history"][item["title"
612                                 ]].append(tst_data["result"]["receive-rate"].
613                                           avg)
614                             elif table["include-tests"] == "PDR":
615                                 if tst_data["type"] == "PDR":
616                                     tbl_dict[tst_name_mod]["history"][
617                                         item["title"]].\
618                                         append(tst_data["throughput"]["value"])
619                                 elif tst_data["type"] == "NDRPDR":
620                                     tbl_dict[tst_name_mod]["history"][item[
621                                         "title"]].append(tst_data["throughput"][
622                                         "PDR"]["LOWER"])
623                             elif table["include-tests"] == "NDR":
624                                 if tst_data["type"] == "NDR":
625                                     tbl_dict[tst_name_mod]["history"][
626                                         item["title"]].\
627                                         append(tst_data["throughput"]["value"])
628                                 elif tst_data["type"] == "NDRPDR":
629                                     tbl_dict[tst_name_mod]["history"][item[
630                                         "title"]].append(tst_data["throughput"][
631                                         "NDR"]["LOWER"])
632                             else:
633                                 continue
634                         except (TypeError, KeyError):
635                             pass
636
637     tbl_lst = list()
638     for tst_name in tbl_dict.keys():
639         item = [tbl_dict[tst_name]["name"], ]
640         if history:
641             if tbl_dict[tst_name].get("history", None) is not None:
642                 for hist_data in tbl_dict[tst_name]["history"].values():
643                     if hist_data:
644                         item.append(round(mean(hist_data) / 1000000, 2))
645                         item.append(round(stdev(hist_data) / 1000000, 2))
646                     else:
647                         item.extend([None, None])
648             else:
649                 item.extend([None, None])
650         data_t = tbl_dict[tst_name]["ref-data"]
651         if data_t:
652             item.append(round(mean(data_t) / 1000000, 2))
653             item.append(round(stdev(data_t) / 1000000, 2))
654         else:
655             item.extend([None, None])
656         data_t = tbl_dict[tst_name]["cmp-data"]
657         if data_t:
658             item.append(round(mean(data_t) / 1000000, 2))
659             item.append(round(stdev(data_t) / 1000000, 2))
660         else:
661             item.extend([None, None])
662         if "dot1q" in tbl_dict[tst_name]["name"]:
663             item.append("Changed methodology")
664         elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
665             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
666         else:
667             item.append("n/a")
668         if (len(item) == len(header)) and (item[-5] is not None):
669             tbl_lst.append(item)
670
671     # Sort the table according to the relative change
672     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
673
674     # Generate csv tables:
675     csv_file = "{0}.csv".format(table["output-file"])
676     with open(csv_file, "w") as file_handler:
677         file_handler.write(header_str)
678         for test in tbl_lst:
679             file_handler.write(",".join([str(item) for item in test]) + "\n")
680
681     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
682
683
684 def table_nics_comparison(table, input_data):
685     """Generate the table(s) with algorithm: table_nics_comparison
686     specified in the specification file.
687
688     :param table: Table to generate.
689     :param input_data: Data to process.
690     :type table: pandas.Series
691     :type input_data: InputData
692     """
693
694     logging.info("  Generating the table {0} ...".
695                  format(table.get("title", "")))
696
697     # Transform the data
698     logging.info("    Creating the data set for the {0} '{1}'.".
699                  format(table.get("type", ""), table.get("title", "")))
700     data = input_data.filter_data(table, continue_on_error=True)
701
702     # Prepare the header of the tables
703     try:
704         header = ["Test case", ]
705
706         if table["include-tests"] == "MRR":
707             hdr_param = "Receive Rate"
708         else:
709             hdr_param = "Throughput"
710
711         header.extend(
712             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
713              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
714              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
715              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
716              "Delta [%]"])
717         header_str = ",".join(header) + "\n"
718     except (AttributeError, KeyError) as err:
719         logging.error("The model is invalid, missing parameter: {0}".
720                       format(err))
721         return
722
723     # Prepare data to the table:
724     tbl_dict = dict()
725     for job, builds in table["data"].items():
726         for build in builds:
727             for tst_name, tst_data in data[job][str(build)].iteritems():
728                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
729                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
730                     replace("-ndrdisc", "").replace("-pdr", "").\
731                     replace("-ndr", "").\
732                     replace("1t1c", "1c").replace("2t1c", "1c").\
733                     replace("2t2c", "2c").replace("4t2c", "2c").\
734                     replace("4t4c", "4c").replace("8t4c", "4c")
735                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
736                 if tbl_dict.get(tst_name_mod, None) is None:
737                     name = "-".join(tst_data["name"].split("-")[:-1])
738                     tbl_dict[tst_name_mod] = {"name": name,
739                                               "ref-data": list(),
740                                               "cmp-data": list()}
741                 try:
742                     if table["include-tests"] == "MRR":
743                         result = tst_data["result"]["receive-rate"].avg
744                     elif table["include-tests"] == "PDR":
745                         result = tst_data["throughput"]["PDR"]["LOWER"]
746                     elif table["include-tests"] == "NDR":
747                         result = tst_data["throughput"]["NDR"]["LOWER"]
748                     else:
749                         result = None
750
751                     if result:
752                         if table["reference"]["nic"] in tst_data["tags"]:
753                             tbl_dict[tst_name_mod]["ref-data"].append(result)
754                         elif table["compare"]["nic"] in tst_data["tags"]:
755                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
756                 except (TypeError, KeyError) as err:
757                     logging.debug("No data for {0}".format(tst_name))
758                     logging.debug(repr(err))
759                     # No data in output.xml for this test
760
761     tbl_lst = list()
762     for tst_name in tbl_dict.keys():
763         item = [tbl_dict[tst_name]["name"], ]
764         data_t = tbl_dict[tst_name]["ref-data"]
765         if data_t:
766             item.append(round(mean(data_t) / 1000000, 2))
767             item.append(round(stdev(data_t) / 1000000, 2))
768         else:
769             item.extend([None, None])
770         data_t = tbl_dict[tst_name]["cmp-data"]
771         if data_t:
772             item.append(round(mean(data_t) / 1000000, 2))
773             item.append(round(stdev(data_t) / 1000000, 2))
774         else:
775             item.extend([None, None])
776         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
777             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
778         if len(item) == len(header):
779             tbl_lst.append(item)
780
781     # Sort the table according to the relative change
782     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
783
784     # Generate csv tables:
785     csv_file = "{0}.csv".format(table["output-file"])
786     with open(csv_file, "w") as file_handler:
787         file_handler.write(header_str)
788         for test in tbl_lst:
789             file_handler.write(",".join([str(item) for item in test]) + "\n")
790
791     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
792
793
794 def table_soak_vs_ndr(table, input_data):
795     """Generate the table(s) with algorithm: table_soak_vs_ndr
796     specified in the specification file.
797
798     :param table: Table to generate.
799     :param input_data: Data to process.
800     :type table: pandas.Series
801     :type input_data: InputData
802     """
803
804     logging.info("  Generating the table {0} ...".
805                  format(table.get("title", "")))
806
807     # Transform the data
808     logging.info("    Creating the data set for the {0} '{1}'.".
809                  format(table.get("type", ""), table.get("title", "")))
810     data = input_data.filter_data(table, continue_on_error=True)
811
812     # Prepare the header of the table
813     try:
814         header = [
815             "Test case",
816             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
817             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
818             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
819             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
820             "Delta [%]", "Stdev of delta [%]"]
821         header_str = ",".join(header) + "\n"
822     except (AttributeError, KeyError) as err:
823         logging.error("The model is invalid, missing parameter: {0}".
824                       format(err))
825         return
826
827     # Create a list of available SOAK test results:
828     tbl_dict = dict()
829     for job, builds in table["compare"]["data"].items():
830         for build in builds:
831             for tst_name, tst_data in data[job][str(build)].iteritems():
832                 if tst_data["type"] == "SOAK":
833                     tst_name_mod = tst_name.replace("-soak", "")
834                     if tbl_dict.get(tst_name_mod, None) is None:
835                         groups = re.search(REGEX_NIC, tst_data["parent"])
836                         nic = groups.group(0) if groups else ""
837                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
838                                                               split("-")[:-1]))
839                         tbl_dict[tst_name_mod] = {
840                             "name": name,
841                             "ref-data": list(),
842                             "cmp-data": list()
843                         }
844                     try:
845                         tbl_dict[tst_name_mod]["cmp-data"].append(
846                             tst_data["throughput"]["LOWER"])
847                     except (KeyError, TypeError):
848                         pass
849     tests_lst = tbl_dict.keys()
850
851     # Add corresponding NDR test results:
852     for job, builds in table["reference"]["data"].items():
853         for build in builds:
854             for tst_name, tst_data in data[job][str(build)].iteritems():
855                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
856                     replace("-mrr", "")
857                 if tst_name_mod in tests_lst:
858                     try:
859                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
860                             if table["include-tests"] == "MRR":
861                                 result = tst_data["result"]["receive-rate"].avg
862                             elif table["include-tests"] == "PDR":
863                                 result = tst_data["throughput"]["PDR"]["LOWER"]
864                             elif table["include-tests"] == "NDR":
865                                 result = tst_data["throughput"]["NDR"]["LOWER"]
866                             else:
867                                 result = None
868                             if result is not None:
869                                 tbl_dict[tst_name_mod]["ref-data"].append(
870                                     result)
871                     except (KeyError, TypeError):
872                         continue
873
874     tbl_lst = list()
875     for tst_name in tbl_dict.keys():
876         item = [tbl_dict[tst_name]["name"], ]
877         data_r = tbl_dict[tst_name]["ref-data"]
878         if data_r:
879             data_r_mean = mean(data_r)
880             item.append(round(data_r_mean / 1000000, 2))
881             data_r_stdev = stdev(data_r)
882             item.append(round(data_r_stdev / 1000000, 2))
883         else:
884             data_r_mean = None
885             data_r_stdev = None
886             item.extend([None, None])
887         data_c = tbl_dict[tst_name]["cmp-data"]
888         if data_c:
889             data_c_mean = mean(data_c)
890             item.append(round(data_c_mean / 1000000, 2))
891             data_c_stdev = stdev(data_c)
892             item.append(round(data_c_stdev / 1000000, 2))
893         else:
894             data_c_mean = None
895             data_c_stdev = None
896             item.extend([None, None])
897         if data_r_mean and data_c_mean:
898             delta, d_stdev = relative_change_stdev(
899                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
900             item.append(round(delta, 2))
901             item.append(round(d_stdev, 2))
902             tbl_lst.append(item)
903
904     # Sort the table according to the relative change
905     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
906
907     # Generate csv tables:
908     csv_file = "{0}.csv".format(table["output-file"])
909     with open(csv_file, "w") as file_handler:
910         file_handler.write(header_str)
911         for test in tbl_lst:
912             file_handler.write(",".join([str(item) for item in test]) + "\n")
913
914     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
915
916
917 def table_performance_trending_dashboard(table, input_data):
918     """Generate the table(s) with algorithm:
919     table_performance_trending_dashboard
920     specified in the specification file.
921
922     :param table: Table to generate.
923     :param input_data: Data to process.
924     :type table: pandas.Series
925     :type input_data: InputData
926     """
927
928     logging.info("  Generating the table {0} ...".
929                  format(table.get("title", "")))
930
931     # Transform the data
932     logging.info("    Creating the data set for the {0} '{1}'.".
933                  format(table.get("type", ""), table.get("title", "")))
934     data = input_data.filter_data(table, continue_on_error=True)
935
936     # Prepare the header of the tables
937     header = ["Test Case",
938               "Trend [Mpps]",
939               "Short-Term Change [%]",
940               "Long-Term Change [%]",
941               "Regressions [#]",
942               "Progressions [#]"
943               ]
944     header_str = ",".join(header) + "\n"
945
946     # Prepare data to the table:
947     tbl_dict = dict()
948     for job, builds in table["data"].items():
949         for build in builds:
950             for tst_name, tst_data in data[job][str(build)].iteritems():
951                 if tst_name.lower() in table.get("ignore-list", list()):
952                     continue
953                 if tbl_dict.get(tst_name, None) is None:
954                     groups = re.search(REGEX_NIC, tst_data["parent"])
955                     if not groups:
956                         continue
957                     nic = groups.group(0)
958                     tbl_dict[tst_name] = {
959                         "name": "{0}-{1}".format(nic, tst_data["name"]),
960                         "data": OrderedDict()}
961                 try:
962                     tbl_dict[tst_name]["data"][str(build)] = \
963                         tst_data["result"]["receive-rate"]
964                 except (TypeError, KeyError):
965                     pass  # No data in output.xml for this test
966
967     tbl_lst = list()
968     for tst_name in tbl_dict.keys():
969         data_t = tbl_dict[tst_name]["data"]
970         if len(data_t) < 2:
971             continue
972
973         classification_lst, avgs = classify_anomalies(data_t)
974
975         win_size = min(len(data_t), table["window"])
976         long_win_size = min(len(data_t), table["long-trend-window"])
977
978         try:
979             max_long_avg = max(
980                 [x for x in avgs[-long_win_size:-win_size]
981                  if not isnan(x)])
982         except ValueError:
983             max_long_avg = nan
984         last_avg = avgs[-1]
985         avg_week_ago = avgs[max(-win_size, -len(avgs))]
986
987         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
988             rel_change_last = nan
989         else:
990             rel_change_last = round(
991                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
992
993         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
994             rel_change_long = nan
995         else:
996             rel_change_long = round(
997                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
998
999         if classification_lst:
1000             if isnan(rel_change_last) and isnan(rel_change_long):
1001                 continue
1002             if (isnan(last_avg) or
1003                 isnan(rel_change_last) or
1004                 isnan(rel_change_long)):
1005                 continue
1006             tbl_lst.append(
1007                 [tbl_dict[tst_name]["name"],
1008                  round(last_avg / 1000000, 2),
1009                  rel_change_last,
1010                  rel_change_long,
1011                  classification_lst[-win_size:].count("regression"),
1012                  classification_lst[-win_size:].count("progression")])
1013
1014     tbl_lst.sort(key=lambda rel: rel[0])
1015
1016     tbl_sorted = list()
1017     for nrr in range(table["window"], -1, -1):
1018         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1019         for nrp in range(table["window"], -1, -1):
1020             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1021             tbl_out.sort(key=lambda rel: rel[2])
1022             tbl_sorted.extend(tbl_out)
1023
1024     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1025
1026     logging.info("    Writing file: '{0}'".format(file_name))
1027     with open(file_name, "w") as file_handler:
1028         file_handler.write(header_str)
1029         for test in tbl_sorted:
1030             file_handler.write(",".join([str(item) for item in test]) + '\n')
1031
1032     txt_file_name = "{0}.txt".format(table["output-file"])
1033     logging.info("    Writing file: '{0}'".format(txt_file_name))
1034     convert_csv_to_pretty_txt(file_name, txt_file_name)
1035
1036
1037 def _generate_url(base, testbed, test_name):
1038     """Generate URL to a trending plot from the name of the test case.
1039
1040     :param base: The base part of URL common to all test cases.
1041     :param testbed: The testbed used for testing.
1042     :param test_name: The name of the test case.
1043     :type base: str
1044     :type testbed: str
1045     :type test_name: str
1046     :returns: The URL to the plot with the trending data for the given test
1047         case.
1048     :rtype str
1049     """
1050
1051     url = base
1052     file_name = ""
1053     anchor = ".html#"
1054     feature = ""
1055
1056     if "lbdpdk" in test_name or "lbvpp" in test_name:
1057         file_name = "link_bonding"
1058
1059     elif "114b" in test_name and "vhost" in test_name:
1060         file_name = "vts"
1061
1062     elif "testpmd" in test_name or "l3fwd" in test_name:
1063         file_name = "dpdk"
1064
1065     elif "memif" in test_name:
1066         file_name = "container_memif"
1067         feature = "-base"
1068
1069     elif "srv6" in test_name:
1070         file_name = "srv6"
1071
1072     elif "vhost" in test_name:
1073         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1074             file_name = "vm_vhost_l2"
1075             if "114b" in test_name:
1076                 feature = ""
1077             elif "l2xcbase" in test_name and "x520" in test_name:
1078                 feature = "-base-l2xc"
1079             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1080                 feature = "-base-l2bd"
1081             else:
1082                 feature = "-base"
1083         elif "ip4base" in test_name:
1084             file_name = "vm_vhost_ip4"
1085             feature = "-base"
1086
1087     elif "ipsecbasetnlsw" in test_name:
1088         file_name = "ipsecsw"
1089         feature = "-base-scale"
1090
1091     elif "ipsec" in test_name:
1092         file_name = "ipsec"
1093         feature = "-base-scale"
1094         if "hw-" in test_name:
1095             file_name = "ipsechw"
1096         elif "sw-" in test_name:
1097             file_name = "ipsecsw"
1098
1099     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1100         file_name = "ip4_tunnels"
1101         feature = "-base"
1102
1103     elif "ip4base" in test_name or "ip4scale" in test_name:
1104         file_name = "ip4"
1105         if "xl710" in test_name:
1106             feature = "-base-scale-features"
1107         elif "iacl" in test_name:
1108             feature = "-features-iacl"
1109         elif "oacl" in test_name:
1110             feature = "-features-oacl"
1111         elif "snat" in test_name or "cop" in test_name:
1112             feature = "-features"
1113         else:
1114             feature = "-base-scale"
1115
1116     elif "ip6base" in test_name or "ip6scale" in test_name:
1117         file_name = "ip6"
1118         feature = "-base-scale"
1119
1120     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1121             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1122             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1123         file_name = "l2"
1124         if "macip" in test_name:
1125             feature = "-features-macip"
1126         elif "iacl" in test_name:
1127             feature = "-features-iacl"
1128         elif "oacl" in test_name:
1129             feature = "-features-oacl"
1130         else:
1131             feature = "-base-scale"
1132
1133     if "x520" in test_name:
1134         nic = "x520-"
1135     elif "x710" in test_name:
1136         nic = "x710-"
1137     elif "xl710" in test_name:
1138         nic = "xl710-"
1139     elif "xxv710" in test_name:
1140         nic = "xxv710-"
1141     elif "vic1227" in test_name:
1142         nic = "vic1227-"
1143     elif "vic1385" in test_name:
1144         nic = "vic1385-"
1145     else:
1146         nic = ""
1147     anchor += nic
1148
1149     if "64b" in test_name:
1150         framesize = "64b"
1151     elif "78b" in test_name:
1152         framesize = "78b"
1153     elif "imix" in test_name:
1154         framesize = "imix"
1155     elif "9000b" in test_name:
1156         framesize = "9000b"
1157     elif "1518b" in test_name:
1158         framesize = "1518b"
1159     elif "114b" in test_name:
1160         framesize = "114b"
1161     else:
1162         framesize = ""
1163     anchor += framesize + '-'
1164
1165     if "1t1c" in test_name:
1166         anchor += "1t1c"
1167     elif "2t2c" in test_name:
1168         anchor += "2t2c"
1169     elif "4t4c" in test_name:
1170         anchor += "4t4c"
1171     elif "2t1c" in test_name:
1172         anchor += "2t1c"
1173     elif "4t2c" in test_name:
1174         anchor += "4t2c"
1175     elif "8t4c" in test_name:
1176         anchor += "8t4c"
1177
1178     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1179         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1180
1181
1182 def table_performance_trending_dashboard_html(table, input_data):
1183     """Generate the table(s) with algorithm:
1184     table_performance_trending_dashboard_html specified in the specification
1185     file.
1186
1187     :param table: Table to generate.
1188     :param input_data: Data to process.
1189     :type table: dict
1190     :type input_data: InputData
1191     """
1192
1193     testbed = table.get("testbed", None)
1194     if testbed is None:
1195         logging.error("The testbed is not defined for the table '{0}'.".
1196                       format(table.get("title", "")))
1197         return
1198
1199     logging.info("  Generating the table {0} ...".
1200                  format(table.get("title", "")))
1201
1202     try:
1203         with open(table["input-file"], 'rb') as csv_file:
1204             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1205             csv_lst = [item for item in csv_content]
1206     except KeyError:
1207         logging.warning("The input file is not defined.")
1208         return
1209     except csv.Error as err:
1210         logging.warning("Not possible to process the file '{0}'.\n{1}".
1211                         format(table["input-file"], err))
1212         return
1213
1214     # Table:
1215     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1216
1217     # Table header:
1218     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1219     for idx, item in enumerate(csv_lst[0]):
1220         alignment = "left" if idx == 0 else "center"
1221         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1222         th.text = item
1223
1224     # Rows:
1225     colors = {"regression": ("#ffcccc", "#ff9999"),
1226               "progression": ("#c6ecc6", "#9fdf9f"),
1227               "normal": ("#e9f1fb", "#d4e4f7")}
1228     for r_idx, row in enumerate(csv_lst[1:]):
1229         if int(row[4]):
1230             color = "regression"
1231         elif int(row[5]):
1232             color = "progression"
1233         else:
1234             color = "normal"
1235         background = colors[color][r_idx % 2]
1236         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1237
1238         # Columns:
1239         for c_idx, item in enumerate(row):
1240             alignment = "left" if c_idx == 0 else "center"
1241             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1242             # Name:
1243             if c_idx == 0:
1244                 url = _generate_url("../trending/", testbed, item)
1245                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1246                 ref.text = item
1247             else:
1248                 td.text = item
1249     try:
1250         with open(table["output-file"], 'w') as html_file:
1251             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1252             html_file.write(".. raw:: html\n\n\t")
1253             html_file.write(ET.tostring(dashboard))
1254             html_file.write("\n\t<p><br><br></p>\n")
1255     except KeyError:
1256         logging.warning("The output file is not defined.")
1257         return
1258
1259
1260 def table_last_failed_tests(table, input_data):
1261     """Generate the table(s) with algorithm: table_last_failed_tests
1262     specified in the specification file.
1263
1264     :param table: Table to generate.
1265     :param input_data: Data to process.
1266     :type table: pandas.Series
1267     :type input_data: InputData
1268     """
1269
1270     logging.info("  Generating the table {0} ...".
1271                  format(table.get("title", "")))
1272
1273     # Transform the data
1274     logging.info("    Creating the data set for the {0} '{1}'.".
1275                  format(table.get("type", ""), table.get("title", "")))
1276     data = input_data.filter_data(table, continue_on_error=True)
1277
1278     if data is None or data.empty:
1279         logging.warn("    No data for the {0} '{1}'.".
1280                      format(table.get("type", ""), table.get("title", "")))
1281         return
1282
1283     tbl_list = list()
1284     for job, builds in table["data"].items():
1285         for build in builds:
1286             build = str(build)
1287             try:
1288                 version = input_data.metadata(job, build).get("version", "")
1289             except KeyError:
1290                 logging.error("Data for {job}: {build} is not present.".
1291                               format(job=job, build=build))
1292                 return
1293             tbl_list.append(build)
1294             tbl_list.append(version)
1295             for tst_name, tst_data in data[job][build].iteritems():
1296                 if tst_data["status"] != "FAIL":
1297                     continue
1298                 groups = re.search(REGEX_NIC, tst_data["parent"])
1299                 if not groups:
1300                     continue
1301                 nic = groups.group(0)
1302                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1303
1304     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1305     logging.info("    Writing file: '{0}'".format(file_name))
1306     with open(file_name, "w") as file_handler:
1307         for test in tbl_list:
1308             file_handler.write(test + '\n')
1309
1310
1311 def table_failed_tests(table, input_data):
1312     """Generate the table(s) with algorithm: table_failed_tests
1313     specified in the specification file.
1314
1315     :param table: Table to generate.
1316     :param input_data: Data to process.
1317     :type table: pandas.Series
1318     :type input_data: InputData
1319     """
1320
1321     logging.info("  Generating the table {0} ...".
1322                  format(table.get("title", "")))
1323
1324     # Transform the data
1325     logging.info("    Creating the data set for the {0} '{1}'.".
1326                  format(table.get("type", ""), table.get("title", "")))
1327     data = input_data.filter_data(table, continue_on_error=True)
1328
1329     # Prepare the header of the tables
1330     header = ["Test Case",
1331               "Failures [#]",
1332               "Last Failure [Time]",
1333               "Last Failure [VPP-Build-Id]",
1334               "Last Failure [CSIT-Job-Build-Id]"]
1335
1336     # Generate the data for the table according to the model in the table
1337     # specification
1338
1339     now = dt.utcnow()
1340     timeperiod = timedelta(int(table.get("window", 7)))
1341
1342     tbl_dict = dict()
1343     for job, builds in table["data"].items():
1344         for build in builds:
1345             build = str(build)
1346             for tst_name, tst_data in data[job][build].iteritems():
1347                 if tst_name.lower() in table.get("ignore-list", list()):
1348                     continue
1349                 if tbl_dict.get(tst_name, None) is None:
1350                     groups = re.search(REGEX_NIC, tst_data["parent"])
1351                     if not groups:
1352                         continue
1353                     nic = groups.group(0)
1354                     tbl_dict[tst_name] = {
1355                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1356                         "data": OrderedDict()}
1357                 try:
1358                     generated = input_data.metadata(job, build).\
1359                         get("generated", "")
1360                     if not generated:
1361                         continue
1362                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1363                     if (now - then) <= timeperiod:
1364                         tbl_dict[tst_name]["data"][build] = (
1365                             tst_data["status"],
1366                             generated,
1367                             input_data.metadata(job, build).get("version", ""),
1368                             build)
1369                 except (TypeError, KeyError) as err:
1370                     logging.warning("tst_name: {} - err: {}".
1371                                     format(tst_name, repr(err)))
1372
1373     max_fails = 0
1374     tbl_lst = list()
1375     for tst_data in tbl_dict.values():
1376         fails_nr = 0
1377         for val in tst_data["data"].values():
1378             if val[0] == "FAIL":
1379                 fails_nr += 1
1380                 fails_last_date = val[1]
1381                 fails_last_vpp = val[2]
1382                 fails_last_csit = val[3]
1383         if fails_nr:
1384             max_fails = fails_nr if fails_nr > max_fails else max_fails
1385             tbl_lst.append([tst_data["name"],
1386                             fails_nr,
1387                             fails_last_date,
1388                             fails_last_vpp,
1389                             "mrr-daily-build-{0}".format(fails_last_csit)])
1390
1391     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1392     tbl_sorted = list()
1393     for nrf in range(max_fails, -1, -1):
1394         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1395         tbl_sorted.extend(tbl_fails)
1396     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1397
1398     logging.info("    Writing file: '{0}'".format(file_name))
1399     with open(file_name, "w") as file_handler:
1400         file_handler.write(",".join(header) + "\n")
1401         for test in tbl_sorted:
1402             file_handler.write(",".join([str(item) for item in test]) + '\n')
1403
1404     txt_file_name = "{0}.txt".format(table["output-file"])
1405     logging.info("    Writing file: '{0}'".format(txt_file_name))
1406     convert_csv_to_pretty_txt(file_name, txt_file_name)
1407
1408
1409 def table_failed_tests_html(table, input_data):
1410     """Generate the table(s) with algorithm: table_failed_tests_html
1411     specified in the specification file.
1412
1413     :param table: Table to generate.
1414     :param input_data: Data to process.
1415     :type table: pandas.Series
1416     :type input_data: InputData
1417     """
1418
1419     testbed = table.get("testbed", None)
1420     if testbed is None:
1421         logging.error("The testbed is not defined for the table '{0}'.".
1422                       format(table.get("title", "")))
1423         return
1424
1425     logging.info("  Generating the table {0} ...".
1426                  format(table.get("title", "")))
1427
1428     try:
1429         with open(table["input-file"], 'rb') as csv_file:
1430             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1431             csv_lst = [item for item in csv_content]
1432     except KeyError:
1433         logging.warning("The input file is not defined.")
1434         return
1435     except csv.Error as err:
1436         logging.warning("Not possible to process the file '{0}'.\n{1}".
1437                         format(table["input-file"], err))
1438         return
1439
1440     # Table:
1441     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1442
1443     # Table header:
1444     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1445     for idx, item in enumerate(csv_lst[0]):
1446         alignment = "left" if idx == 0 else "center"
1447         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1448         th.text = item
1449
1450     # Rows:
1451     colors = ("#e9f1fb", "#d4e4f7")
1452     for r_idx, row in enumerate(csv_lst[1:]):
1453         background = colors[r_idx % 2]
1454         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1455
1456         # Columns:
1457         for c_idx, item in enumerate(row):
1458             alignment = "left" if c_idx == 0 else "center"
1459             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1460             # Name:
1461             if c_idx == 0:
1462                 url = _generate_url("../trending/", testbed, item)
1463                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1464                 ref.text = item
1465             else:
1466                 td.text = item
1467     try:
1468         with open(table["output-file"], 'w') as html_file:
1469             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1470             html_file.write(".. raw:: html\n\n\t")
1471             html_file.write(ET.tostring(failed_tests))
1472             html_file.write("\n\t<p><br><br></p>\n")
1473     except KeyError:
1474         logging.warning("The output file is not defined.")
1475         return