PAL: list all sel tests in table_performance_comparison
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 if tbl_dict.get(tst_name_mod, None) is None:
300                     groups = re.search(REGEX_NIC, tst_data["parent"])
301                     nic = groups.group(0) if groups else ""
302                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
303                                                           split("-")[:-1]))
304                     if "across testbeds" in table["title"].lower() or \
305                             "across topologies" in table["title"].lower():
306                         name = name.\
307                             replace("1t1c", "1c").replace("2t1c", "1c").\
308                             replace("2t2c", "2c").replace("4t2c", "2c").\
309                             replace("4t4c", "4c").replace("8t4c", "4c")
310                     tbl_dict[tst_name_mod] = {"name": name,
311                                               "ref-data": list(),
312                                               "cmp-data": list()}
313                 try:
314                     # TODO: Re-work when NDRPDRDISC tests are not used
315                     if table["include-tests"] == "MRR":
316                         tbl_dict[tst_name_mod]["cmp-data"]. \
317                             append(tst_data["result"]["receive-rate"].avg)
318                     elif table["include-tests"] == "PDR":
319                         if tst_data["type"] == "PDR":
320                             tbl_dict[tst_name_mod]["cmp-data"]. \
321                                 append(tst_data["throughput"]["value"])
322                         elif tst_data["type"] == "NDRPDR":
323                             tbl_dict[tst_name_mod]["cmp-data"].append(
324                                 tst_data["throughput"]["PDR"]["LOWER"])
325                     elif table["include-tests"] == "NDR":
326                         if tst_data["type"] == "NDR":
327                             tbl_dict[tst_name_mod]["cmp-data"]. \
328                                 append(tst_data["throughput"]["value"])
329                         elif tst_data["type"] == "NDRPDR":
330                             tbl_dict[tst_name_mod]["cmp-data"].append(
331                                 tst_data["throughput"]["NDR"]["LOWER"])
332                     else:
333                         continue
334                 except (KeyError, TypeError):
335                     pass
336     if history:
337         for item in history:
338             for job, builds in item["data"].items():
339                 for build in builds:
340                     for tst_name, tst_data in data[job][str(build)].iteritems():
341                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343                             replace("-ndrdisc", "").replace("-pdr", ""). \
344                             replace("-ndr", "").\
345                             replace("1t1c", "1c").replace("2t1c", "1c").\
346                             replace("2t2c", "2c").replace("4t2c", "2c").\
347                             replace("4t4c", "4c").replace("8t4c", "4c")
348                         if "across topologies" in table["title"].lower():
349                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
350                         if tbl_dict.get(tst_name_mod, None) is None:
351                             continue
352                         if tbl_dict[tst_name_mod].get("history", None) is None:
353                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
354                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
355                                                              None) is None:
356                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
357                                 list()
358                         try:
359                             # TODO: Re-work when NDRPDRDISC tests are not used
360                             if table["include-tests"] == "MRR":
361                                 tbl_dict[tst_name_mod]["history"][item["title"
362                                 ]].append(tst_data["result"]["receive-rate"].
363                                           avg)
364                             elif table["include-tests"] == "PDR":
365                                 if tst_data["type"] == "PDR":
366                                     tbl_dict[tst_name_mod]["history"][
367                                         item["title"]].\
368                                         append(tst_data["throughput"]["value"])
369                                 elif tst_data["type"] == "NDRPDR":
370                                     tbl_dict[tst_name_mod]["history"][item[
371                                         "title"]].append(tst_data["throughput"][
372                                         "PDR"]["LOWER"])
373                             elif table["include-tests"] == "NDR":
374                                 if tst_data["type"] == "NDR":
375                                     tbl_dict[tst_name_mod]["history"][
376                                         item["title"]].\
377                                         append(tst_data["throughput"]["value"])
378                                 elif tst_data["type"] == "NDRPDR":
379                                     tbl_dict[tst_name_mod]["history"][item[
380                                         "title"]].append(tst_data["throughput"][
381                                         "NDR"]["LOWER"])
382                             else:
383                                 continue
384                         except (TypeError, KeyError):
385                             pass
386
387     tbl_lst = list()
388     for tst_name in tbl_dict.keys():
389         item = [tbl_dict[tst_name]["name"], ]
390         if history:
391             if tbl_dict[tst_name].get("history", None) is not None:
392                 for hist_data in tbl_dict[tst_name]["history"].values():
393                     if hist_data:
394                         item.append(round(mean(hist_data) / 1000000, 2))
395                         item.append(round(stdev(hist_data) / 1000000, 2))
396                     else:
397                         item.extend([None, None])
398             else:
399                 item.extend([None, None])
400         data_t = tbl_dict[tst_name]["ref-data"]
401         if data_t:
402             item.append(round(mean(data_t) / 1000000, 2))
403             item.append(round(stdev(data_t) / 1000000, 2))
404         else:
405             item.extend([None, None])
406         data_t = tbl_dict[tst_name]["cmp-data"]
407         if data_t:
408             item.append(round(mean(data_t) / 1000000, 2))
409             item.append(round(stdev(data_t) / 1000000, 2))
410         else:
411             item.extend([None, None])
412         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
413             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
414         else:
415             item.append(None)
416         if len(item) == len(header):
417             tbl_lst.append(item)
418
419     # Sort the table according to the relative change
420     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
421
422     # Generate csv tables:
423     csv_file = "{0}.csv".format(table["output-file"])
424     with open(csv_file, "w") as file_handler:
425         file_handler.write(header_str)
426         for test in tbl_lst:
427             file_handler.write(",".join([str(item) for item in test]) + "\n")
428
429     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
430
431
432 def table_performance_comparison_nic(table, input_data):
433     """Generate the table(s) with algorithm: table_performance_comparison
434     specified in the specification file.
435
436     :param table: Table to generate.
437     :param input_data: Data to process.
438     :type table: pandas.Series
439     :type input_data: InputData
440     """
441
442     logging.info("  Generating the table {0} ...".
443                  format(table.get("title", "")))
444
445     # Transform the data
446     logging.info("    Creating the data set for the {0} '{1}'.".
447                  format(table.get("type", ""), table.get("title", "")))
448     data = input_data.filter_data(table, continue_on_error=True)
449
450     # Prepare the header of the tables
451     try:
452         header = ["Test case", ]
453
454         if table["include-tests"] == "MRR":
455             hdr_param = "Receive Rate"
456         else:
457             hdr_param = "Throughput"
458
459         history = table.get("history", None)
460         if history:
461             for item in history:
462                 header.extend(
463                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
464                      "{0} Stdev [Mpps]".format(item["title"])])
465         header.extend(
466             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
467              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
468              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
469              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
470              "Delta [%]"])
471         header_str = ",".join(header) + "\n"
472     except (AttributeError, KeyError) as err:
473         logging.error("The model is invalid, missing parameter: {0}".
474                       format(err))
475         return
476
477     # Prepare data to the table:
478     tbl_dict = dict()
479     for job, builds in table["reference"]["data"].items():
480         for build in builds:
481             for tst_name, tst_data in data[job][str(build)].iteritems():
482                 if table["reference"]["nic"] not in tst_data["tags"]:
483                     continue
484                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
485                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
486                     replace("-ndrdisc", "").replace("-pdr", "").\
487                     replace("-ndr", "").\
488                     replace("1t1c", "1c").replace("2t1c", "1c").\
489                     replace("2t2c", "2c").replace("4t2c", "2c").\
490                     replace("4t4c", "4c").replace("8t4c", "4c")
491                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
492                 if "across topologies" in table["title"].lower():
493                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
494                 if tbl_dict.get(tst_name_mod, None) is None:
495                     name = "{0}".format("-".join(tst_data["name"].
496                                                  split("-")[:-1]))
497                     if "across testbeds" in table["title"].lower() or \
498                             "across topologies" in table["title"].lower():
499                         name = name.\
500                             replace("1t1c", "1c").replace("2t1c", "1c").\
501                             replace("2t2c", "2c").replace("4t2c", "2c").\
502                             replace("4t4c", "4c").replace("8t4c", "4c")
503                     tbl_dict[tst_name_mod] = {"name": name,
504                                               "ref-data": list(),
505                                               "cmp-data": list()}
506                 try:
507                     # TODO: Re-work when NDRPDRDISC tests are not used
508                     if table["include-tests"] == "MRR":
509                         tbl_dict[tst_name_mod]["ref-data"]. \
510                             append(tst_data["result"]["receive-rate"].avg)
511                     elif table["include-tests"] == "PDR":
512                         if tst_data["type"] == "PDR":
513                             tbl_dict[tst_name_mod]["ref-data"]. \
514                                 append(tst_data["throughput"]["value"])
515                         elif tst_data["type"] == "NDRPDR":
516                             tbl_dict[tst_name_mod]["ref-data"].append(
517                                 tst_data["throughput"]["PDR"]["LOWER"])
518                     elif table["include-tests"] == "NDR":
519                         if tst_data["type"] == "NDR":
520                             tbl_dict[tst_name_mod]["ref-data"]. \
521                                 append(tst_data["throughput"]["value"])
522                         elif tst_data["type"] == "NDRPDR":
523                             tbl_dict[tst_name_mod]["ref-data"].append(
524                                 tst_data["throughput"]["NDR"]["LOWER"])
525                     else:
526                         continue
527                 except TypeError:
528                     pass  # No data in output.xml for this test
529
530     for job, builds in table["compare"]["data"].items():
531         for build in builds:
532             for tst_name, tst_data in data[job][str(build)].iteritems():
533                 if table["compare"]["nic"] not in tst_data["tags"]:
534                     continue
535                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
536                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
537                     replace("-ndrdisc", "").replace("-pdr", ""). \
538                     replace("-ndr", "").\
539                     replace("1t1c", "1c").replace("2t1c", "1c").\
540                     replace("2t2c", "2c").replace("4t2c", "2c").\
541                     replace("4t4c", "4c").replace("8t4c", "4c")
542                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
543                 if "across topologies" in table["title"].lower():
544                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
545                 if tbl_dict.get(tst_name_mod, None) is None:
546                     name = "{0}".format("-".join(tst_data["name"].
547                                                  split("-")[:-1]))
548                     if "across testbeds" in table["title"].lower() or \
549                             "across topologies" in table["title"].lower():
550                         name = name.\
551                             replace("1t1c", "1c").replace("2t1c", "1c").\
552                             replace("2t2c", "2c").replace("4t2c", "2c").\
553                             replace("4t4c", "4c").replace("8t4c", "4c")
554                     tbl_dict[tst_name_mod] = {"name": name,
555                                               "ref-data": list(),
556                                               "cmp-data": list()}
557                 try:
558                     # TODO: Re-work when NDRPDRDISC tests are not used
559                     if table["include-tests"] == "MRR":
560                         tbl_dict[tst_name_mod]["cmp-data"]. \
561                             append(tst_data["result"]["receive-rate"].avg)
562                     elif table["include-tests"] == "PDR":
563                         if tst_data["type"] == "PDR":
564                             tbl_dict[tst_name_mod]["cmp-data"]. \
565                                 append(tst_data["throughput"]["value"])
566                         elif tst_data["type"] == "NDRPDR":
567                             tbl_dict[tst_name_mod]["cmp-data"].append(
568                                 tst_data["throughput"]["PDR"]["LOWER"])
569                     elif table["include-tests"] == "NDR":
570                         if tst_data["type"] == "NDR":
571                             tbl_dict[tst_name_mod]["cmp-data"]. \
572                                 append(tst_data["throughput"]["value"])
573                         elif tst_data["type"] == "NDRPDR":
574                             tbl_dict[tst_name_mod]["cmp-data"].append(
575                                 tst_data["throughput"]["NDR"]["LOWER"])
576                     else:
577                         continue
578                 except (KeyError, TypeError):
579                     pass
580
581     if history:
582         for item in history:
583             for job, builds in item["data"].items():
584                 for build in builds:
585                     for tst_name, tst_data in data[job][str(build)].iteritems():
586                         if item["nic"] not in tst_data["tags"]:
587                             continue
588                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
589                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
590                             replace("-ndrdisc", "").replace("-pdr", ""). \
591                             replace("-ndr", "").\
592                             replace("1t1c", "1c").replace("2t1c", "1c").\
593                             replace("2t2c", "2c").replace("4t2c", "2c").\
594                             replace("4t4c", "4c").replace("8t4c", "4c")
595                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
596                         if "across topologies" in table["title"].lower():
597                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
598                         if tbl_dict.get(tst_name_mod, None) is None:
599                             continue
600                         if tbl_dict[tst_name_mod].get("history", None) is None:
601                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
602                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
603                                                              None) is None:
604                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
605                                 list()
606                         try:
607                             # TODO: Re-work when NDRPDRDISC tests are not used
608                             if table["include-tests"] == "MRR":
609                                 tbl_dict[tst_name_mod]["history"][item["title"
610                                 ]].append(tst_data["result"]["receive-rate"].
611                                           avg)
612                             elif table["include-tests"] == "PDR":
613                                 if tst_data["type"] == "PDR":
614                                     tbl_dict[tst_name_mod]["history"][
615                                         item["title"]].\
616                                         append(tst_data["throughput"]["value"])
617                                 elif tst_data["type"] == "NDRPDR":
618                                     tbl_dict[tst_name_mod]["history"][item[
619                                         "title"]].append(tst_data["throughput"][
620                                         "PDR"]["LOWER"])
621                             elif table["include-tests"] == "NDR":
622                                 if tst_data["type"] == "NDR":
623                                     tbl_dict[tst_name_mod]["history"][
624                                         item["title"]].\
625                                         append(tst_data["throughput"]["value"])
626                                 elif tst_data["type"] == "NDRPDR":
627                                     tbl_dict[tst_name_mod]["history"][item[
628                                         "title"]].append(tst_data["throughput"][
629                                         "NDR"]["LOWER"])
630                             else:
631                                 continue
632                         except (TypeError, KeyError):
633                             pass
634
635     tbl_lst = list()
636     for tst_name in tbl_dict.keys():
637         item = [tbl_dict[tst_name]["name"], ]
638         if history:
639             if tbl_dict[tst_name].get("history", None) is not None:
640                 for hist_data in tbl_dict[tst_name]["history"].values():
641                     if hist_data:
642                         item.append(round(mean(hist_data) / 1000000, 2))
643                         item.append(round(stdev(hist_data) / 1000000, 2))
644                     else:
645                         item.extend([None, None])
646             else:
647                 item.extend([None, None])
648         data_t = tbl_dict[tst_name]["ref-data"]
649         if data_t:
650             item.append(round(mean(data_t) / 1000000, 2))
651             item.append(round(stdev(data_t) / 1000000, 2))
652         else:
653             item.extend([None, None])
654         data_t = tbl_dict[tst_name]["cmp-data"]
655         if data_t:
656             item.append(round(mean(data_t) / 1000000, 2))
657             item.append(round(stdev(data_t) / 1000000, 2))
658         else:
659             item.extend([None, None])
660         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
661             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
662         else:
663             item.append(None)
664         if len(item) == len(header):
665             tbl_lst.append(item)
666
667     # Sort the table according to the relative change
668     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
669
670     # Generate csv tables:
671     csv_file = "{0}.csv".format(table["output-file"])
672     with open(csv_file, "w") as file_handler:
673         file_handler.write(header_str)
674         for test in tbl_lst:
675             file_handler.write(",".join([str(item) for item in test]) + "\n")
676
677     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
678
679
680 def table_nics_comparison(table, input_data):
681     """Generate the table(s) with algorithm: table_nics_comparison
682     specified in the specification file.
683
684     :param table: Table to generate.
685     :param input_data: Data to process.
686     :type table: pandas.Series
687     :type input_data: InputData
688     """
689
690     logging.info("  Generating the table {0} ...".
691                  format(table.get("title", "")))
692
693     # Transform the data
694     logging.info("    Creating the data set for the {0} '{1}'.".
695                  format(table.get("type", ""), table.get("title", "")))
696     data = input_data.filter_data(table, continue_on_error=True)
697
698     # Prepare the header of the tables
699     try:
700         header = ["Test case", ]
701
702         if table["include-tests"] == "MRR":
703             hdr_param = "Receive Rate"
704         else:
705             hdr_param = "Throughput"
706
707         header.extend(
708             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
709              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
710              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
711              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
712              "Delta [%]"])
713         header_str = ",".join(header) + "\n"
714     except (AttributeError, KeyError) as err:
715         logging.error("The model is invalid, missing parameter: {0}".
716                       format(err))
717         return
718
719     # Prepare data to the table:
720     tbl_dict = dict()
721     for job, builds in table["data"].items():
722         for build in builds:
723             for tst_name, tst_data in data[job][str(build)].iteritems():
724                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
725                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
726                     replace("-ndrdisc", "").replace("-pdr", "").\
727                     replace("-ndr", "").\
728                     replace("1t1c", "1c").replace("2t1c", "1c").\
729                     replace("2t2c", "2c").replace("4t2c", "2c").\
730                     replace("4t4c", "4c").replace("8t4c", "4c")
731                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
732                 if tbl_dict.get(tst_name_mod, None) is None:
733                     name = "-".join(tst_data["name"].split("-")[:-1])
734                     tbl_dict[tst_name_mod] = {"name": name,
735                                               "ref-data": list(),
736                                               "cmp-data": list()}
737                 try:
738                     if table["include-tests"] == "MRR":
739                         result = tst_data["result"]["receive-rate"].avg
740                     elif table["include-tests"] == "PDR":
741                         result = tst_data["throughput"]["PDR"]["LOWER"]
742                     elif table["include-tests"] == "NDR":
743                         result = tst_data["throughput"]["NDR"]["LOWER"]
744                     else:
745                         result = None
746
747                     if result:
748                         if table["reference"]["nic"] in tst_data["tags"]:
749                             tbl_dict[tst_name_mod]["ref-data"].append(result)
750                         elif table["compare"]["nic"] in tst_data["tags"]:
751                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
752                 except (TypeError, KeyError) as err:
753                     logging.debug("No data for {0}".format(tst_name))
754                     logging.debug(repr(err))
755                     # No data in output.xml for this test
756
757     tbl_lst = list()
758     for tst_name in tbl_dict.keys():
759         item = [tbl_dict[tst_name]["name"], ]
760         data_t = tbl_dict[tst_name]["ref-data"]
761         if data_t:
762             item.append(round(mean(data_t) / 1000000, 2))
763             item.append(round(stdev(data_t) / 1000000, 2))
764         else:
765             item.extend([None, None])
766         data_t = tbl_dict[tst_name]["cmp-data"]
767         if data_t:
768             item.append(round(mean(data_t) / 1000000, 2))
769             item.append(round(stdev(data_t) / 1000000, 2))
770         else:
771             item.extend([None, None])
772         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
773             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
774         if len(item) == len(header):
775             tbl_lst.append(item)
776
777     # Sort the table according to the relative change
778     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
779
780     # Generate csv tables:
781     csv_file = "{0}.csv".format(table["output-file"])
782     with open(csv_file, "w") as file_handler:
783         file_handler.write(header_str)
784         for test in tbl_lst:
785             file_handler.write(",".join([str(item) for item in test]) + "\n")
786
787     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
788
789
790 def table_soak_vs_ndr(table, input_data):
791     """Generate the table(s) with algorithm: table_soak_vs_ndr
792     specified in the specification file.
793
794     :param table: Table to generate.
795     :param input_data: Data to process.
796     :type table: pandas.Series
797     :type input_data: InputData
798     """
799
800     logging.info("  Generating the table {0} ...".
801                  format(table.get("title", "")))
802
803     # Transform the data
804     logging.info("    Creating the data set for the {0} '{1}'.".
805                  format(table.get("type", ""), table.get("title", "")))
806     data = input_data.filter_data(table, continue_on_error=True)
807
808     # Prepare the header of the table
809     try:
810         header = [
811             "Test case",
812             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
813             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
814             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
815             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
816             "Delta [%]", "Stdev of delta [%]"]
817         header_str = ",".join(header) + "\n"
818     except (AttributeError, KeyError) as err:
819         logging.error("The model is invalid, missing parameter: {0}".
820                       format(err))
821         return
822
823     # Create a list of available SOAK test results:
824     tbl_dict = dict()
825     for job, builds in table["compare"]["data"].items():
826         for build in builds:
827             for tst_name, tst_data in data[job][str(build)].iteritems():
828                 if tst_data["type"] == "SOAK":
829                     tst_name_mod = tst_name.replace("-soak", "")
830                     if tbl_dict.get(tst_name_mod, None) is None:
831                         groups = re.search(REGEX_NIC, tst_data["parent"])
832                         nic = groups.group(0) if groups else ""
833                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
834                                                               split("-")[:-1]))
835                         tbl_dict[tst_name_mod] = {
836                             "name": name,
837                             "ref-data": list(),
838                             "cmp-data": list()
839                         }
840                     try:
841                         tbl_dict[tst_name_mod]["cmp-data"].append(
842                             tst_data["throughput"]["LOWER"])
843                     except (KeyError, TypeError):
844                         pass
845     tests_lst = tbl_dict.keys()
846
847     # Add corresponding NDR test results:
848     for job, builds in table["reference"]["data"].items():
849         for build in builds:
850             for tst_name, tst_data in data[job][str(build)].iteritems():
851                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
852                     replace("-mrr", "")
853                 if tst_name_mod in tests_lst:
854                     try:
855                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
856                             if table["include-tests"] == "MRR":
857                                 result = tst_data["result"]["receive-rate"].avg
858                             elif table["include-tests"] == "PDR":
859                                 result = tst_data["throughput"]["PDR"]["LOWER"]
860                             elif table["include-tests"] == "NDR":
861                                 result = tst_data["throughput"]["NDR"]["LOWER"]
862                             else:
863                                 result = None
864                             if result is not None:
865                                 tbl_dict[tst_name_mod]["ref-data"].append(
866                                     result)
867                     except (KeyError, TypeError):
868                         continue
869
870     tbl_lst = list()
871     for tst_name in tbl_dict.keys():
872         item = [tbl_dict[tst_name]["name"], ]
873         data_r = tbl_dict[tst_name]["ref-data"]
874         if data_r:
875             data_r_mean = mean(data_r)
876             item.append(round(data_r_mean / 1000000, 2))
877             data_r_stdev = stdev(data_r)
878             item.append(round(data_r_stdev / 1000000, 2))
879         else:
880             data_r_mean = None
881             data_r_stdev = None
882             item.extend([None, None])
883         data_c = tbl_dict[tst_name]["cmp-data"]
884         if data_c:
885             data_c_mean = mean(data_c)
886             item.append(round(data_c_mean / 1000000, 2))
887             data_c_stdev = stdev(data_c)
888             item.append(round(data_c_stdev / 1000000, 2))
889         else:
890             data_c_mean = None
891             data_c_stdev = None
892             item.extend([None, None])
893         if data_r_mean and data_c_mean:
894             delta, d_stdev = relative_change_stdev(
895                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
896             item.append(round(delta, 2))
897             item.append(round(d_stdev, 2))
898             tbl_lst.append(item)
899
900     # Sort the table according to the relative change
901     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
902
903     # Generate csv tables:
904     csv_file = "{0}.csv".format(table["output-file"])
905     with open(csv_file, "w") as file_handler:
906         file_handler.write(header_str)
907         for test in tbl_lst:
908             file_handler.write(",".join([str(item) for item in test]) + "\n")
909
910     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
911
912
913 def table_performance_trending_dashboard(table, input_data):
914     """Generate the table(s) with algorithm:
915     table_performance_trending_dashboard
916     specified in the specification file.
917
918     :param table: Table to generate.
919     :param input_data: Data to process.
920     :type table: pandas.Series
921     :type input_data: InputData
922     """
923
924     logging.info("  Generating the table {0} ...".
925                  format(table.get("title", "")))
926
927     # Transform the data
928     logging.info("    Creating the data set for the {0} '{1}'.".
929                  format(table.get("type", ""), table.get("title", "")))
930     data = input_data.filter_data(table, continue_on_error=True)
931
932     # Prepare the header of the tables
933     header = ["Test Case",
934               "Trend [Mpps]",
935               "Short-Term Change [%]",
936               "Long-Term Change [%]",
937               "Regressions [#]",
938               "Progressions [#]"
939               ]
940     header_str = ",".join(header) + "\n"
941
942     # Prepare data to the table:
943     tbl_dict = dict()
944     for job, builds in table["data"].items():
945         for build in builds:
946             for tst_name, tst_data in data[job][str(build)].iteritems():
947                 if tst_name.lower() in table.get("ignore-list", list()):
948                     continue
949                 if tbl_dict.get(tst_name, None) is None:
950                     groups = re.search(REGEX_NIC, tst_data["parent"])
951                     if not groups:
952                         continue
953                     nic = groups.group(0)
954                     tbl_dict[tst_name] = {
955                         "name": "{0}-{1}".format(nic, tst_data["name"]),
956                         "data": OrderedDict()}
957                 try:
958                     tbl_dict[tst_name]["data"][str(build)] = \
959                         tst_data["result"]["receive-rate"]
960                 except (TypeError, KeyError):
961                     pass  # No data in output.xml for this test
962
963     tbl_lst = list()
964     for tst_name in tbl_dict.keys():
965         data_t = tbl_dict[tst_name]["data"]
966         if len(data_t) < 2:
967             continue
968
969         classification_lst, avgs = classify_anomalies(data_t)
970
971         win_size = min(len(data_t), table["window"])
972         long_win_size = min(len(data_t), table["long-trend-window"])
973
974         try:
975             max_long_avg = max(
976                 [x for x in avgs[-long_win_size:-win_size]
977                  if not isnan(x)])
978         except ValueError:
979             max_long_avg = nan
980         last_avg = avgs[-1]
981         avg_week_ago = avgs[max(-win_size, -len(avgs))]
982
983         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
984             rel_change_last = nan
985         else:
986             rel_change_last = round(
987                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
988
989         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
990             rel_change_long = nan
991         else:
992             rel_change_long = round(
993                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
994
995         if classification_lst:
996             if isnan(rel_change_last) and isnan(rel_change_long):
997                 continue
998             if (isnan(last_avg) or
999                 isnan(rel_change_last) or
1000                 isnan(rel_change_long)):
1001                 continue
1002             tbl_lst.append(
1003                 [tbl_dict[tst_name]["name"],
1004                  round(last_avg / 1000000, 2),
1005                  rel_change_last,
1006                  rel_change_long,
1007                  classification_lst[-win_size:].count("regression"),
1008                  classification_lst[-win_size:].count("progression")])
1009
1010     tbl_lst.sort(key=lambda rel: rel[0])
1011
1012     tbl_sorted = list()
1013     for nrr in range(table["window"], -1, -1):
1014         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1015         for nrp in range(table["window"], -1, -1):
1016             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1017             tbl_out.sort(key=lambda rel: rel[2])
1018             tbl_sorted.extend(tbl_out)
1019
1020     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1021
1022     logging.info("    Writing file: '{0}'".format(file_name))
1023     with open(file_name, "w") as file_handler:
1024         file_handler.write(header_str)
1025         for test in tbl_sorted:
1026             file_handler.write(",".join([str(item) for item in test]) + '\n')
1027
1028     txt_file_name = "{0}.txt".format(table["output-file"])
1029     logging.info("    Writing file: '{0}'".format(txt_file_name))
1030     convert_csv_to_pretty_txt(file_name, txt_file_name)
1031
1032
1033 def _generate_url(base, testbed, test_name):
1034     """Generate URL to a trending plot from the name of the test case.
1035
1036     :param base: The base part of URL common to all test cases.
1037     :param testbed: The testbed used for testing.
1038     :param test_name: The name of the test case.
1039     :type base: str
1040     :type testbed: str
1041     :type test_name: str
1042     :returns: The URL to the plot with the trending data for the given test
1043         case.
1044     :rtype str
1045     """
1046
1047     url = base
1048     file_name = ""
1049     anchor = ".html#"
1050     feature = ""
1051
1052     if "lbdpdk" in test_name or "lbvpp" in test_name:
1053         file_name = "link_bonding"
1054
1055     elif "114b" in test_name and "vhost" in test_name:
1056         file_name = "vts"
1057
1058     elif "testpmd" in test_name or "l3fwd" in test_name:
1059         file_name = "dpdk"
1060
1061     elif "memif" in test_name:
1062         file_name = "container_memif"
1063         feature = "-base"
1064
1065     elif "srv6" in test_name:
1066         file_name = "srv6"
1067
1068     elif "vhost" in test_name:
1069         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1070             file_name = "vm_vhost_l2"
1071             if "114b" in test_name:
1072                 feature = ""
1073             elif "l2xcbase" in test_name and "x520" in test_name:
1074                 feature = "-base-l2xc"
1075             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1076                 feature = "-base-l2bd"
1077             else:
1078                 feature = "-base"
1079         elif "ip4base" in test_name:
1080             file_name = "vm_vhost_ip4"
1081             feature = "-base"
1082
1083     elif "ipsecbasetnlsw" in test_name:
1084         file_name = "ipsecsw"
1085         feature = "-base-scale"
1086
1087     elif "ipsec" in test_name:
1088         file_name = "ipsec"
1089         feature = "-base-scale"
1090         if "hw-" in test_name:
1091             file_name = "ipsechw"
1092         elif "sw-" in test_name:
1093             file_name = "ipsecsw"
1094
1095     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1096         file_name = "ip4_tunnels"
1097         feature = "-base"
1098
1099     elif "ip4base" in test_name or "ip4scale" in test_name:
1100         file_name = "ip4"
1101         if "xl710" in test_name:
1102             feature = "-base-scale-features"
1103         elif "iacl" in test_name:
1104             feature = "-features-iacl"
1105         elif "oacl" in test_name:
1106             feature = "-features-oacl"
1107         elif "snat" in test_name or "cop" in test_name:
1108             feature = "-features"
1109         else:
1110             feature = "-base-scale"
1111
1112     elif "ip6base" in test_name or "ip6scale" in test_name:
1113         file_name = "ip6"
1114         feature = "-base-scale"
1115
1116     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1117             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1118             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1119         file_name = "l2"
1120         if "macip" in test_name:
1121             feature = "-features-macip"
1122         elif "iacl" in test_name:
1123             feature = "-features-iacl"
1124         elif "oacl" in test_name:
1125             feature = "-features-oacl"
1126         else:
1127             feature = "-base-scale"
1128
1129     if "x520" in test_name:
1130         nic = "x520-"
1131     elif "x710" in test_name:
1132         nic = "x710-"
1133     elif "xl710" in test_name:
1134         nic = "xl710-"
1135     elif "xxv710" in test_name:
1136         nic = "xxv710-"
1137     elif "vic1227" in test_name:
1138         nic = "vic1227-"
1139     elif "vic1385" in test_name:
1140         nic = "vic1385-"
1141     else:
1142         nic = ""
1143     anchor += nic
1144
1145     if "64b" in test_name:
1146         framesize = "64b"
1147     elif "78b" in test_name:
1148         framesize = "78b"
1149     elif "imix" in test_name:
1150         framesize = "imix"
1151     elif "9000b" in test_name:
1152         framesize = "9000b"
1153     elif "1518b" in test_name:
1154         framesize = "1518b"
1155     elif "114b" in test_name:
1156         framesize = "114b"
1157     else:
1158         framesize = ""
1159     anchor += framesize + '-'
1160
1161     if "1t1c" in test_name:
1162         anchor += "1t1c"
1163     elif "2t2c" in test_name:
1164         anchor += "2t2c"
1165     elif "4t4c" in test_name:
1166         anchor += "4t4c"
1167     elif "2t1c" in test_name:
1168         anchor += "2t1c"
1169     elif "4t2c" in test_name:
1170         anchor += "4t2c"
1171     elif "8t4c" in test_name:
1172         anchor += "8t4c"
1173
1174     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1175         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1176
1177
1178 def table_performance_trending_dashboard_html(table, input_data):
1179     """Generate the table(s) with algorithm:
1180     table_performance_trending_dashboard_html specified in the specification
1181     file.
1182
1183     :param table: Table to generate.
1184     :param input_data: Data to process.
1185     :type table: dict
1186     :type input_data: InputData
1187     """
1188
1189     testbed = table.get("testbed", None)
1190     if testbed is None:
1191         logging.error("The testbed is not defined for the table '{0}'.".
1192                       format(table.get("title", "")))
1193         return
1194
1195     logging.info("  Generating the table {0} ...".
1196                  format(table.get("title", "")))
1197
1198     try:
1199         with open(table["input-file"], 'rb') as csv_file:
1200             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1201             csv_lst = [item for item in csv_content]
1202     except KeyError:
1203         logging.warning("The input file is not defined.")
1204         return
1205     except csv.Error as err:
1206         logging.warning("Not possible to process the file '{0}'.\n{1}".
1207                         format(table["input-file"], err))
1208         return
1209
1210     # Table:
1211     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1212
1213     # Table header:
1214     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1215     for idx, item in enumerate(csv_lst[0]):
1216         alignment = "left" if idx == 0 else "center"
1217         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1218         th.text = item
1219
1220     # Rows:
1221     colors = {"regression": ("#ffcccc", "#ff9999"),
1222               "progression": ("#c6ecc6", "#9fdf9f"),
1223               "normal": ("#e9f1fb", "#d4e4f7")}
1224     for r_idx, row in enumerate(csv_lst[1:]):
1225         if int(row[4]):
1226             color = "regression"
1227         elif int(row[5]):
1228             color = "progression"
1229         else:
1230             color = "normal"
1231         background = colors[color][r_idx % 2]
1232         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1233
1234         # Columns:
1235         for c_idx, item in enumerate(row):
1236             alignment = "left" if c_idx == 0 else "center"
1237             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1238             # Name:
1239             if c_idx == 0:
1240                 url = _generate_url("../trending/", testbed, item)
1241                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1242                 ref.text = item
1243             else:
1244                 td.text = item
1245     try:
1246         with open(table["output-file"], 'w') as html_file:
1247             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1248             html_file.write(".. raw:: html\n\n\t")
1249             html_file.write(ET.tostring(dashboard))
1250             html_file.write("\n\t<p><br><br></p>\n")
1251     except KeyError:
1252         logging.warning("The output file is not defined.")
1253         return
1254
1255
1256 def table_last_failed_tests(table, input_data):
1257     """Generate the table(s) with algorithm: table_last_failed_tests
1258     specified in the specification file.
1259
1260     :param table: Table to generate.
1261     :param input_data: Data to process.
1262     :type table: pandas.Series
1263     :type input_data: InputData
1264     """
1265
1266     logging.info("  Generating the table {0} ...".
1267                  format(table.get("title", "")))
1268
1269     # Transform the data
1270     logging.info("    Creating the data set for the {0} '{1}'.".
1271                  format(table.get("type", ""), table.get("title", "")))
1272     data = input_data.filter_data(table, continue_on_error=True)
1273
1274     if data is None or data.empty:
1275         logging.warn("    No data for the {0} '{1}'.".
1276                      format(table.get("type", ""), table.get("title", "")))
1277         return
1278
1279     tbl_list = list()
1280     for job, builds in table["data"].items():
1281         for build in builds:
1282             build = str(build)
1283             try:
1284                 version = input_data.metadata(job, build).get("version", "")
1285             except KeyError:
1286                 logging.error("Data for {job}: {build} is not present.".
1287                               format(job=job, build=build))
1288                 return
1289             tbl_list.append(build)
1290             tbl_list.append(version)
1291             for tst_name, tst_data in data[job][build].iteritems():
1292                 if tst_data["status"] != "FAIL":
1293                     continue
1294                 groups = re.search(REGEX_NIC, tst_data["parent"])
1295                 if not groups:
1296                     continue
1297                 nic = groups.group(0)
1298                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1299
1300     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1301     logging.info("    Writing file: '{0}'".format(file_name))
1302     with open(file_name, "w") as file_handler:
1303         for test in tbl_list:
1304             file_handler.write(test + '\n')
1305
1306
1307 def table_failed_tests(table, input_data):
1308     """Generate the table(s) with algorithm: table_failed_tests
1309     specified in the specification file.
1310
1311     :param table: Table to generate.
1312     :param input_data: Data to process.
1313     :type table: pandas.Series
1314     :type input_data: InputData
1315     """
1316
1317     logging.info("  Generating the table {0} ...".
1318                  format(table.get("title", "")))
1319
1320     # Transform the data
1321     logging.info("    Creating the data set for the {0} '{1}'.".
1322                  format(table.get("type", ""), table.get("title", "")))
1323     data = input_data.filter_data(table, continue_on_error=True)
1324
1325     # Prepare the header of the tables
1326     header = ["Test Case",
1327               "Failures [#]",
1328               "Last Failure [Time]",
1329               "Last Failure [VPP-Build-Id]",
1330               "Last Failure [CSIT-Job-Build-Id]"]
1331
1332     # Generate the data for the table according to the model in the table
1333     # specification
1334
1335     now = dt.utcnow()
1336     timeperiod = timedelta(int(table.get("window", 7)))
1337
1338     tbl_dict = dict()
1339     for job, builds in table["data"].items():
1340         for build in builds:
1341             build = str(build)
1342             for tst_name, tst_data in data[job][build].iteritems():
1343                 if tst_name.lower() in table.get("ignore-list", list()):
1344                     continue
1345                 if tbl_dict.get(tst_name, None) is None:
1346                     groups = re.search(REGEX_NIC, tst_data["parent"])
1347                     if not groups:
1348                         continue
1349                     nic = groups.group(0)
1350                     tbl_dict[tst_name] = {
1351                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1352                         "data": OrderedDict()}
1353                 try:
1354                     generated = input_data.metadata(job, build).\
1355                         get("generated", "")
1356                     if not generated:
1357                         continue
1358                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1359                     if (now - then) <= timeperiod:
1360                         tbl_dict[tst_name]["data"][build] = (
1361                             tst_data["status"],
1362                             generated,
1363                             input_data.metadata(job, build).get("version", ""),
1364                             build)
1365                 except (TypeError, KeyError) as err:
1366                     logging.warning("tst_name: {} - err: {}".
1367                                     format(tst_name, repr(err)))
1368
1369     max_fails = 0
1370     tbl_lst = list()
1371     for tst_data in tbl_dict.values():
1372         fails_nr = 0
1373         for val in tst_data["data"].values():
1374             if val[0] == "FAIL":
1375                 fails_nr += 1
1376                 fails_last_date = val[1]
1377                 fails_last_vpp = val[2]
1378                 fails_last_csit = val[3]
1379         if fails_nr:
1380             max_fails = fails_nr if fails_nr > max_fails else max_fails
1381             tbl_lst.append([tst_data["name"],
1382                             fails_nr,
1383                             fails_last_date,
1384                             fails_last_vpp,
1385                             "mrr-daily-build-{0}".format(fails_last_csit)])
1386
1387     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1388     tbl_sorted = list()
1389     for nrf in range(max_fails, -1, -1):
1390         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1391         tbl_sorted.extend(tbl_fails)
1392     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1393
1394     logging.info("    Writing file: '{0}'".format(file_name))
1395     with open(file_name, "w") as file_handler:
1396         file_handler.write(",".join(header) + "\n")
1397         for test in tbl_sorted:
1398             file_handler.write(",".join([str(item) for item in test]) + '\n')
1399
1400     txt_file_name = "{0}.txt".format(table["output-file"])
1401     logging.info("    Writing file: '{0}'".format(txt_file_name))
1402     convert_csv_to_pretty_txt(file_name, txt_file_name)
1403
1404
1405 def table_failed_tests_html(table, input_data):
1406     """Generate the table(s) with algorithm: table_failed_tests_html
1407     specified in the specification file.
1408
1409     :param table: Table to generate.
1410     :param input_data: Data to process.
1411     :type table: pandas.Series
1412     :type input_data: InputData
1413     """
1414
1415     testbed = table.get("testbed", None)
1416     if testbed is None:
1417         logging.error("The testbed is not defined for the table '{0}'.".
1418                       format(table.get("title", "")))
1419         return
1420
1421     logging.info("  Generating the table {0} ...".
1422                  format(table.get("title", "")))
1423
1424     try:
1425         with open(table["input-file"], 'rb') as csv_file:
1426             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1427             csv_lst = [item for item in csv_content]
1428     except KeyError:
1429         logging.warning("The input file is not defined.")
1430         return
1431     except csv.Error as err:
1432         logging.warning("Not possible to process the file '{0}'.\n{1}".
1433                         format(table["input-file"], err))
1434         return
1435
1436     # Table:
1437     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1438
1439     # Table header:
1440     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1441     for idx, item in enumerate(csv_lst[0]):
1442         alignment = "left" if idx == 0 else "center"
1443         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1444         th.text = item
1445
1446     # Rows:
1447     colors = ("#e9f1fb", "#d4e4f7")
1448     for r_idx, row in enumerate(csv_lst[1:]):
1449         background = colors[r_idx % 2]
1450         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1451
1452         # Columns:
1453         for c_idx, item in enumerate(row):
1454             alignment = "left" if c_idx == 0 else "center"
1455             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1456             # Name:
1457             if c_idx == 0:
1458                 url = _generate_url("../trending/", testbed, item)
1459                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1460                 ref.text = item
1461             else:
1462                 td.text = item
1463     try:
1464         with open(table["output-file"], 'w') as html_file:
1465             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1466             html_file.write(".. raw:: html\n\n\t")
1467             html_file.write(ET.tostring(failed_tests))
1468             html_file.write("\n\t<p><br><br></p>\n")
1469     except KeyError:
1470         logging.warning("The output file is not defined.")
1471         return