PAL: list all sel tests in table_performance_comparison
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 if tbl_dict.get(tst_name_mod, None) is None:
300                     groups = re.search(REGEX_NIC, tst_data["parent"])
301                     nic = groups.group(0) if groups else ""
302                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
303                                                           split("-")[:-1]))
304                     if "across testbeds" in table["title"].lower() or \
305                             "across topologies" in table["title"].lower():
306                         name = name.\
307                             replace("1t1c", "1c").replace("2t1c", "1c").\
308                             replace("2t2c", "2c").replace("4t2c", "2c").\
309                             replace("4t4c", "4c").replace("8t4c", "4c")
310                     tbl_dict[tst_name_mod] = {"name": name,
311                                               "ref-data": list(),
312                                               "cmp-data": list()}
313                 try:
314                     # TODO: Re-work when NDRPDRDISC tests are not used
315                     if table["include-tests"] == "MRR":
316                         tbl_dict[tst_name_mod]["cmp-data"]. \
317                             append(tst_data["result"]["receive-rate"].avg)
318                     elif table["include-tests"] == "PDR":
319                         if tst_data["type"] == "PDR":
320                             tbl_dict[tst_name_mod]["cmp-data"]. \
321                                 append(tst_data["throughput"]["value"])
322                         elif tst_data["type"] == "NDRPDR":
323                             tbl_dict[tst_name_mod]["cmp-data"].append(
324                                 tst_data["throughput"]["PDR"]["LOWER"])
325                     elif table["include-tests"] == "NDR":
326                         if tst_data["type"] == "NDR":
327                             tbl_dict[tst_name_mod]["cmp-data"]. \
328                                 append(tst_data["throughput"]["value"])
329                         elif tst_data["type"] == "NDRPDR":
330                             tbl_dict[tst_name_mod]["cmp-data"].append(
331                                 tst_data["throughput"]["NDR"]["LOWER"])
332                     else:
333                         continue
334                 except (KeyError, TypeError):
335                     pass
336     if history:
337         for item in history:
338             for job, builds in item["data"].items():
339                 for build in builds:
340                     for tst_name, tst_data in data[job][str(build)].iteritems():
341                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343                             replace("-ndrdisc", "").replace("-pdr", ""). \
344                             replace("-ndr", "").\
345                             replace("1t1c", "1c").replace("2t1c", "1c").\
346                             replace("2t2c", "2c").replace("4t2c", "2c").\
347                             replace("4t4c", "4c").replace("8t4c", "4c")
348                         if "across topologies" in table["title"].lower():
349                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
350                         if tbl_dict.get(tst_name_mod, None) is None:
351                             continue
352                         if tbl_dict[tst_name_mod].get("history", None) is None:
353                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
354                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
355                                                              None) is None:
356                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
357                                 list()
358                         try:
359                             # TODO: Re-work when NDRPDRDISC tests are not used
360                             if table["include-tests"] == "MRR":
361                                 tbl_dict[tst_name_mod]["history"][item["title"
362                                 ]].append(tst_data["result"]["receive-rate"].
363                                           avg)
364                             elif table["include-tests"] == "PDR":
365                                 if tst_data["type"] == "PDR":
366                                     tbl_dict[tst_name_mod]["history"][
367                                         item["title"]].\
368                                         append(tst_data["throughput"]["value"])
369                                 elif tst_data["type"] == "NDRPDR":
370                                     tbl_dict[tst_name_mod]["history"][item[
371                                         "title"]].append(tst_data["throughput"][
372                                         "PDR"]["LOWER"])
373                             elif table["include-tests"] == "NDR":
374                                 if tst_data["type"] == "NDR":
375                                     tbl_dict[tst_name_mod]["history"][
376                                         item["title"]].\
377                                         append(tst_data["throughput"]["value"])
378                                 elif tst_data["type"] == "NDRPDR":
379                                     tbl_dict[tst_name_mod]["history"][item[
380                                         "title"]].append(tst_data["throughput"][
381                                         "NDR"]["LOWER"])
382                             else:
383                                 continue
384                         except (TypeError, KeyError):
385                             pass
386
387     tbl_lst = list()
388     for tst_name in tbl_dict.keys():
389         item = [tbl_dict[tst_name]["name"], ]
390         if history:
391             if tbl_dict[tst_name].get("history", None) is not None:
392                 for hist_data in tbl_dict[tst_name]["history"].values():
393                     if hist_data:
394                         item.append(round(mean(hist_data) / 1000000, 2))
395                         item.append(round(stdev(hist_data) / 1000000, 2))
396                     else:
397                         item.extend([None, None])
398             else:
399                 item.extend([None, None])
400         data_t = tbl_dict[tst_name]["ref-data"]
401         if data_t:
402             item.append(round(mean(data_t) / 1000000, 2))
403             item.append(round(stdev(data_t) / 1000000, 2))
404         else:
405             item.extend([None, None])
406         data_t = tbl_dict[tst_name]["cmp-data"]
407         if data_t:
408             item.append(round(mean(data_t) / 1000000, 2))
409             item.append(round(stdev(data_t) / 1000000, 2))
410         else:
411             item.extend([None, None])
412         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
413             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
414         else:
415             item.append(None)
416         if len(item) == len(header):
417             tbl_lst.append(item)
418
419     # Sort the table according to the relative change
420     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
421
422     # Generate csv tables:
423     csv_file = "{0}.csv".format(table["output-file"])
424     with open(csv_file, "w") as file_handler:
425         file_handler.write(header_str)
426         for test in tbl_lst:
427             file_handler.write(",".join([str(item) for item in test]) + "\n")
428
429     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
430
431
432 def table_performance_comparison_nic(table, input_data):
433     """Generate the table(s) with algorithm: table_performance_comparison
434     specified in the specification file.
435
436     :param table: Table to generate.
437     :param input_data: Data to process.
438     :type table: pandas.Series
439     :type input_data: InputData
440     """
441
442     logging.info("  Generating the table {0} ...".
443                  format(table.get("title", "")))
444
445     # Transform the data
446     logging.info("    Creating the data set for the {0} '{1}'.".
447                  format(table.get("type", ""), table.get("title", "")))
448     data = input_data.filter_data(table, continue_on_error=True)
449
450     # Prepare the header of the tables
451     try:
452         header = ["Test case", ]
453
454         if table["include-tests"] == "MRR":
455             hdr_param = "Receive Rate"
456         else:
457             hdr_param = "Throughput"
458
459         history = table.get("history", None)
460         if history:
461             for item in history:
462                 header.extend(
463                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
464                      "{0} Stdev [Mpps]".format(item["title"])])
465         header.extend(
466             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
467              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
468              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
469              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
470              "Delta [%]"])
471         header_str = ",".join(header) + "\n"
472     except (AttributeError, KeyError) as err:
473         logging.error("The model is invalid, missing parameter: {0}".
474                       format(err))
475         return
476
477     # Prepare data to the table:
478     tbl_dict = dict()
479     for job, builds in table["reference"]["data"].items():
480         for build in builds:
481             for tst_name, tst_data in data[job][str(build)].iteritems():
482                 if table["reference"]["nic"] not in tst_data["tags"]:
483                     continue
484                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
485                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
486                     replace("-ndrdisc", "").replace("-pdr", "").\
487                     replace("-ndr", "").\
488                     replace("1t1c", "1c").replace("2t1c", "1c").\
489                     replace("2t2c", "2c").replace("4t2c", "2c").\
490                     replace("4t4c", "4c").replace("8t4c", "4c")
491                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
492                 if "across topologies" in table["title"].lower():
493                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
494                 if tbl_dict.get(tst_name_mod, None) is None:
495                     name = "{0}".format("-".join(tst_data["name"].
496                                                  split("-")[:-1]))
497                     if "across testbeds" in table["title"].lower() or \
498                             "across topologies" in table["title"].lower():
499                         name = name.\
500                             replace("1t1c", "1c").replace("2t1c", "1c").\
501                             replace("2t2c", "2c").replace("4t2c", "2c").\
502                             replace("4t4c", "4c").replace("8t4c", "4c")
503                     tbl_dict[tst_name_mod] = {"name": name,
504                                               "ref-data": list(),
505                                               "cmp-data": list()}
506                 try:
507                     # TODO: Re-work when NDRPDRDISC tests are not used
508                     if table["include-tests"] == "MRR":
509                         tbl_dict[tst_name_mod]["ref-data"]. \
510                             append(tst_data["result"]["receive-rate"].avg)
511                     elif table["include-tests"] == "PDR":
512                         if tst_data["type"] == "PDR":
513                             tbl_dict[tst_name_mod]["ref-data"]. \
514                                 append(tst_data["throughput"]["value"])
515                         elif tst_data["type"] == "NDRPDR":
516                             tbl_dict[tst_name_mod]["ref-data"].append(
517                                 tst_data["throughput"]["PDR"]["LOWER"])
518                     elif table["include-tests"] == "NDR":
519                         if tst_data["type"] == "NDR":
520                             tbl_dict[tst_name_mod]["ref-data"]. \
521                                 append(tst_data["throughput"]["value"])
522                         elif tst_data["type"] == "NDRPDR":
523                             tbl_dict[tst_name_mod]["ref-data"].append(
524                                 tst_data["throughput"]["NDR"]["LOWER"])
525                     else:
526                         continue
527                 except TypeError:
528                     pass  # No data in output.xml for this test
529
530     for job, builds in table["compare"]["data"].items():
531         for build in builds:
532             for tst_name, tst_data in data[job][str(build)].iteritems():
533                 if table["compare"]["nic"] not in tst_data["tags"]:
534                     continue
535                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
536                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
537                     replace("-ndrdisc", "").replace("-pdr", ""). \
538                     replace("-ndr", "").\
539                     replace("1t1c", "1c").replace("2t1c", "1c").\
540                     replace("2t2c", "2c").replace("4t2c", "2c").\
541                     replace("4t4c", "4c").replace("8t4c", "4c")
542                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
543                 if "across topologies" in table["title"].lower():
544                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
545                 if tbl_dict.get(tst_name_mod, None) is None:
546                     name = "{0}".format("-".join(tst_data["name"].
547                                                  split("-")[:-1]))
548                     if "across testbeds" in table["title"].lower() or \
549                             "across topologies" in table["title"].lower():
550                         name = name.\
551                             replace("1t1c", "1c").replace("2t1c", "1c").\
552                             replace("2t2c", "2c").replace("4t2c", "2c").\
553                             replace("4t4c", "4c").replace("8t4c", "4c")
554                     tbl_dict[tst_name_mod] = {"name": name,
555                                               "ref-data": list(),
556                                               "cmp-data": list()}
557                 try:
558                     # TODO: Re-work when NDRPDRDISC tests are not used
559                     if table["include-tests"] == "MRR":
560                         tbl_dict[tst_name_mod]["cmp-data"]. \
561                             append(tst_data["result"]["receive-rate"].avg)
562                     elif table["include-tests"] == "PDR":
563                         if tst_data["type"] == "PDR":
564                             tbl_dict[tst_name_mod]["cmp-data"]. \
565                                 append(tst_data["throughput"]["value"])
566                         elif tst_data["type"] == "NDRPDR":
567                             tbl_dict[tst_name_mod]["cmp-data"].append(
568                                 tst_data["throughput"]["PDR"]["LOWER"])
569                     elif table["include-tests"] == "NDR":
570                         if tst_data["type"] == "NDR":
571                             tbl_dict[tst_name_mod]["cmp-data"]. \
572                                 append(tst_data["throughput"]["value"])
573                         elif tst_data["type"] == "NDRPDR":
574                             tbl_dict[tst_name_mod]["cmp-data"].append(
575                                 tst_data["throughput"]["NDR"]["LOWER"])
576                     else:
577                         continue
578                 except (KeyError, TypeError):
579                     pass
580
581     if history:
582         for item in history:
583             for job, builds in item["data"].items():
584                 for build in builds:
585                     for tst_name, tst_data in data[job][str(build)].iteritems():
586                         if item["nic"] not in tst_data["tags"]:
587                             continue
588                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
589                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
590                             replace("-ndrdisc", "").replace("-pdr", ""). \
591                             replace("-ndr", "").\
592                             replace("1t1c", "1c").replace("2t1c", "1c").\
593                             replace("2t2c", "2c").replace("4t2c", "2c").\
594                             replace("4t4c", "4c").replace("8t4c", "4c")
595                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
596                         if "across topologies" in table["title"].lower():
597                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
598                         if tbl_dict.get(tst_name_mod, None) is None:
599                             continue
600                         if tbl_dict[tst_name_mod].get("history", None) is None:
601                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
602                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
603                                                              None) is None:
604                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
605                                 list()
606                         try:
607                             # TODO: Re-work when NDRPDRDISC tests are not used
608                             if table["include-tests"] == "MRR":
609                                 tbl_dict[tst_name_mod]["history"][item["title"
610                                 ]].append(tst_data["result"]["receive-rate"].
611                                           avg)
612                             elif table["include-tests"] == "PDR":
613                                 if tst_data["type"] == "PDR":
614                                     tbl_dict[tst_name_mod]["history"][
615                                         item["title"]].\
616                                         append(tst_data["throughput"]["value"])
617                                 elif tst_data["type"] == "NDRPDR":
618                                     tbl_dict[tst_name_mod]["history"][item[
619                                         "title"]].append(tst_data["throughput"][
620                                         "PDR"]["LOWER"])
621                             elif table["include-tests"] == "NDR":
622                                 if tst_data["type"] == "NDR":
623                                     tbl_dict[tst_name_mod]["history"][
624                                         item["title"]].\
625                                         append(tst_data["throughput"]["value"])
626                                 elif tst_data["type"] == "NDRPDR":
627                                     tbl_dict[tst_name_mod]["history"][item[
628                                         "title"]].append(tst_data["throughput"][
629                                         "NDR"]["LOWER"])
630                             else:
631                                 continue
632                         except (TypeError, KeyError):
633                             pass
634
635     tbl_lst = list()
636     for tst_name in tbl_dict.keys():
637         item = [tbl_dict[tst_name]["name"], ]
638         if history:
639             if tbl_dict[tst_name].get("history", None) is not None:
640                 for hist_data in tbl_dict[tst_name]["history"].values():
641                     if hist_data:
642                         item.append(round(mean(hist_data) / 1000000, 2))
643                         item.append(round(stdev(hist_data) / 1000000, 2))
644                     else:
645                         item.extend([None, None])
646             else:
647                 item.extend([None, None])
648         data_t = tbl_dict[tst_name]["ref-data"]
649         if data_t:
650             item.append(round(mean(data_t) / 1000000, 2))
651             item.append(round(stdev(data_t) / 1000000, 2))
652         else:
653             item.extend([None, None])
654         data_t = tbl_dict[tst_name]["cmp-data"]
655         if data_t:
656             item.append(round(mean(data_t) / 1000000, 2))
657             item.append(round(stdev(data_t) / 1000000, 2))
658         else:
659             item.extend([None, None])
660         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
661             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
662         else:
663             item.append(None)
664         if len(item) == len(header):
665             tbl_lst.append(item)
666
667     # Sort the table according to the relative change
668     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
669
670     # Generate csv tables:
671     csv_file = "{0}.csv".format(table["output-file"])
672     with open(csv_file, "w") as file_handler:
673         file_handler.write(header_str)
674         for test in tbl_lst:
675             file_handler.write(",".join([str(item) for item in test]) + "\n")
676
677     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
678
679
680 def table_nics_comparison(table, input_data):
681     """Generate the table(s) with algorithm: table_nics_comparison
682     specified in the specification file.
683
684     :param table: Table to generate.
685     :param input_data: Data to process.
686     :type table: pandas.Series
687     :type input_data: InputData
688     """
689
690     logging.info("  Generating the table {0} ...".
691                  format(table.get("title", "")))
692
693     # Transform the data
694     logging.info("    Creating the data set for the {0} '{1}'.".
695                  format(table.get("type", ""), table.get("title", "")))
696     data = input_data.filter_data(table, continue_on_error=True)
697
698     # Prepare the header of the tables
699     try:
700         header = ["Test case", ]
701
702         if table["include-tests"] == "MRR":
703             hdr_param = "Receive Rate"
704         else:
705             hdr_param = "Throughput"
706
707         header.extend(
708             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
709              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
710              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
711              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
712              "Delta [%]"])
713         header_str = ",".join(header) + "\n"
714     except (AttributeError, KeyError) as err:
715         logging.error("The model is invalid, missing parameter: {0}".
716                       format(err))
717         return
718
719     # Prepare data to the table:
720     tbl_dict = dict()
721     for job, builds in table["data"].items():
722         for build in builds:
723             for tst_name, tst_data in data[job][str(build)].iteritems():
724                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
725                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
726                     replace("-ndrdisc", "").replace("-pdr", "").\
727                     replace("-ndr", "").\
728                     replace("1t1c", "1c").replace("2t1c", "1c").\
729                     replace("2t2c", "2c").replace("4t2c", "2c").\
730                     replace("4t4c", "4c").replace("8t4c", "4c")
731                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
732                 if tbl_dict.get(tst_name_mod, None) is None:
733                     name = "-".join(tst_data["name"].split("-")[:-1])
734                     tbl_dict[tst_name_mod] = {"name": name,
735                                               "ref-data": list(),
736                                               "cmp-data": list()}
737                 try:
738                     if table["include-tests"] == "MRR":
739                         result = tst_data["result"]["receive-rate"].avg
740                     elif table["include-tests"] == "PDR":
741                         result = tst_data["throughput"]["PDR"]["LOWER"]
742                     elif table["include-tests"] == "NDR":
743                         result = tst_data["throughput"]["NDR"]["LOWER"]
744                     else:
745                         result = None
746
747                     if result:
748                         if table["reference"]["nic"] in tst_data["tags"]:
749                             tbl_dict[tst_name_mod]["ref-data"].append(result)
750                         elif table["compare"]["nic"] in tst_data["tags"]:
751                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
752                 except (TypeError, KeyError) as err:
753                     logging.debug("No data for {0}".format(tst_name))
754                     logging.debug(repr(err))
755                     # No data in output.xml for this test
756
757     tbl_lst = list()
758     for tst_name in tbl_dict.keys():
759         item = [tbl_dict[tst_name]["name"], ]
760         data_t = tbl_dict[tst_name]["ref-data"]
761         if data_t:
762             item.append(round(mean(data_t) / 1000000, 2))
763             item.append(round(stdev(data_t) / 1000000, 2))
764         else:
765             item.extend([None, None])
766         data_t = tbl_dict[tst_name]["cmp-data"]
767         if data_t:
768             item.append(round(mean(data_t) / 1000000, 2))
769             item.append(round(stdev(data_t) / 1000000, 2))
770         else:
771             item.extend([None, None])
772         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
773             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
774         if len(item) == len(header):
775             tbl_lst.append(item)
776
777     # Sort the table according to the relative change
778     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
779
780     # Generate csv tables:
781     csv_file = "{0}.csv".format(table["output-file"])
782     with open(csv_file, "w") as file_handler:
783         file_handler.write(header_str)
784         for test in tbl_lst:
785             file_handler.write(",".join([str(item) for item in test]) + "\n")
786
787     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
788
789
790 def table_soak_vs_ndr(table, input_data):
791     """Generate the table(s) with algorithm: table_soak_vs_ndr
792     specified in the specification file.
793
794     :param table: Table to generate.
795     :param input_data: Data to process.
796     :type table: pandas.Series
797     :type input_data: InputData
798     """
799
800     logging.info("  Generating the table {0} ...".
801                  format(table.get("title", "")))
802
803     # Transform the data
804     logging.info("    Creating the data set for the {0} '{1}'.".
805                  format(table.get("type", ""), table.get("title", "")))
806     data = input_data.filter_data(table, continue_on_error=True)
807
808     # Prepare the header of the table
809     try:
810         header = [
811             "Test case",
812             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
813             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
814             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
815             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
816             "Delta [%]", "Stdev of delta [%]"]
817         header_str = ",".join(header) + "\n"
818     except (AttributeError, KeyError) as err:
819         logging.error("The model is invalid, missing parameter: {0}".
820                       format(err))
821         return
822
823     # Create a list of available SOAK test results:
824     tbl_dict = dict()
825     for job, builds in table["compare"]["data"].items():
826         for build in builds:
827             for tst_name, tst_data in data[job][str(build)].iteritems():
828                 if tst_data["type"] == "SOAK":
829                     tst_name_mod = tst_name.replace("-soak", "")
830                     if tbl_dict.get(tst_name_mod, None) is None:
831                         groups = re.search(REGEX_NIC, tst_data["parent"])
832                         nic = groups.group(0) if groups else ""
833                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
834                                                               split("-")[:-1]))
835                         tbl_dict[tst_name_mod] = {
836                             "name": name,
837                             "ref-data": list(),
838                             "cmp-data": list()
839                         }
840                     try:
841                         tbl_dict[tst_name_mod]["cmp-data"].append(
842                             tst_data["throughput"]["LOWER"])
843                     except (KeyError, TypeError):
844                         pass
845     tests_lst = tbl_dict.keys()
846
847     # Add corresponding NDR test results:
848     for job, builds in table["reference"]["data"].items():
849         for build in builds:
850             for tst_name, tst_data in data[job][str(build)].iteritems():
851                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
852                     replace("-mrr", "")
853                 if tst_name_mod in tests_lst:
854                     try:
855                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
856                             if table["include-tests"] == "MRR":
857                                 result = tst_data["result"]["receive-rate"].avg
858                             elif table["include-tests"] == "PDR":
859                                 result = tst_data["throughput"]["PDR"]["LOWER"]
860                             elif table["include-tests"] == "NDR":
861                                 result = tst_data["throughput"]["NDR"]["LOWER"]
862                             else:
863                                 result = None
864                             if result is not None:
865                                 tbl_dict[tst_name_mod]["ref-data"].append(
866                                     result)
867                     except (KeyError, TypeError):
868                         continue
869
870     tbl_lst = list()
871     for tst_name in tbl_dict.keys():
872         item = [tbl_dict[tst_name]["name"], ]
873         data_r = tbl_dict[tst_name]["ref-data"]
874         if data_r:
875             data_r_mean = mean(data_r)
876             item.append(round(data_r_mean / 1000000, 2))
877             data_r_stdev = stdev(data_r)
878             item.append(round(data_r_stdev / 1000000, 2))
879         else:
880             data_r_mean = None
881             data_r_stdev = None
882             item.extend([None, None])
883         data_c = tbl_dict[tst_name]["cmp-data"]
884         if data_c:
885             data_c_mean = mean(data_c)
886             item.append(round(data_c_mean / 1000000, 2))
887             data_c_stdev = stdev(data_c)
888             item.append(round(data_c_stdev / 1000000, 2))
889         else:
890             data_c_mean = None
891             data_c_stdev = None
892             item.extend([None, None])
893         if data_r_mean and data_c_mean:
894             delta, d_stdev = relative_change_stdev(
895                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
896             item.append(round(delta, 2))
897             item.append(round(d_stdev, 2))
898             tbl_lst.append(item)
899
900     # Sort the table according to the relative change
901     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
902
903     # Generate csv tables:
904     csv_file = "{0}.csv".format(table["output-file"])
905     with open(csv_file, "w") as file_handler:
906         file_handler.write(header_str)
907         for test in tbl_lst:
908             file_handler.write(",".join([str(item) for item in test]) + "\n")
909
910     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
911
912
913 def table_performance_trending_dashboard(table, input_data):
914     """Generate the table(s) with algorithm:
915     table_performance_trending_dashboard
916     specified in the specification file.
917
918     :param table: Table to generate.
919     :param input_data: Data to process.
920     :type table: pandas.Series
921     :type input_data: InputData
922     """
923
924     logging.info("  Generating the table {0} ...".
925                  format(table.get("title", "")))
926
927     # Transform the data
928     logging.info("    Creating the data set for the {0} '{1}'.".
929                  format(table.get("type", ""), table.get("title", "")))
930     data = input_data.filter_data(table, continue_on_error=True)
931
932     # Prepare the header of the tables
933     header = ["Test Case",
934               "Trend [Mpps]",
935               "Short-Term Change [%]",
936               "Long-Term Change [%]",
937               "Regressions [#]",
938               "Progressions [#]"
939               ]
940     header_str = ",".join(header) + "\n"
941
942     # Prepare data to the table:
943     tbl_dict = dict()
944     for job, builds in table["data"].items():
945         for build in builds:
946             for tst_name, tst_data in data[job][str(build)].iteritems():
947                 if tst_name.lower() in table.get("ignore-list", list()):
948                     continue
949                 if tbl_dict.get(tst_name, None) is None:
950                     groups = re.search(REGEX_NIC, tst_data["parent"])
951                     if not groups:
952                         continue
953                     nic = groups.group(0)
954                     tbl_dict[tst_name] = {
955                         "name": "{0}-{1}".format(nic, tst_data["name"]),
956                         "data": OrderedDict()}
957                 try:
958                     tbl_dict[tst_name]["data"][str(build)] = \
959                         tst_data["result"]["receive-rate"]
960                 except (TypeError, KeyError):
961                     pass  # No data in output.xml for this test
962
963     tbl_lst = list()
964     for tst_name in tbl_dict.keys():
965         data_t = tbl_dict[tst_name]["data"]
966         if len(data_t) < 2:
967             continue
968
969         classification_lst, avgs = classify_anomalies(data_t)
970
971         win_size = min(len(data_t), table["window"])
972         long_win_size = min(len(data_t), table["long-trend-window"])
973
974         try:
975             max_long_avg = max(
976                 [x for x in avgs[-long_win_size:-win_size]
977                  if not isnan(x)])
978         except ValueError:
979             max_long_avg = nan
980         last_avg = avgs[-1]
981         avg_week_ago = avgs[max(-win_size, -len(avgs))]
982
983         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
984             rel_change_last = nan
985         else:
986             rel_change_last = round(
987                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
988
989         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
990             rel_change_long = nan
991         else:
992             rel_change_long = round(
993                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
994
995         if classification_lst:
996             if isnan(rel_change_last) and isnan(rel_change_long):
997                 continue
998             if (isnan(last_avg) or
999                 isnan(rel_change_last) or
1000                 isnan(rel_change_long)):
1001                 continue
1002             tbl_lst.append(
1003                 [tbl_dict[tst_name]["name"],
1004                  round(last_avg / 1000000, 2),
1005                  rel_change_last,
1006                  rel_change_long,
1007                  classification_lst[-win_size:].count("regression"),
1008                  classification_lst[-win_size:].count("progression")])
1009
1010     tbl_lst.sort(key=lambda rel: rel[0])
1011
1012     tbl_sorted = list()
1013     for nrr in range(table["window"], -1, -1):
1014         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1015         for nrp in range(table["window"], -1, -1):
1016             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1017             tbl_out.sort(key=lambda rel: rel[2])
1018             tbl_sorted.extend(tbl_out)
1019
1020     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1021
1022     logging.info("    Writing file: '{0}'".format(file_name))
1023     with open(file_name, "w") as file_handler:
1024         file_handler.write(header_str)
1025         for test in tbl_sorted:
1026             file_handler.write(",".join([str(item) for item in test]) + '\n')
1027
1028     txt_file_name = "{0}.txt".format(table["output-file"])
1029     logging.info("    Writing file: '{0}'".format(txt_file_name))
1030     convert_csv_to_pretty_txt(file_name, txt_file_name)
1031
1032
1033 def _generate_url(base, testbed, test_name):
1034     """Generate URL to a trending plot from the name of the test case.
1035
1036     :param base: The base part of URL common to all test cases.
1037     :param testbed: The testbed used for testing.
1038     :param test_name: The name of the test case.
1039     :type base: str
1040     :type testbed: str
1041     :type test_name: str
1042     :returns: The URL to the plot with the trending data for the given test
1043         case.
1044     :rtype str
1045     """
1046
1047     url = base
1048     file_name = ""
1049     anchor = ".html#"
1050     feature = ""
1051
1052     if "lbdpdk" in test_name or "lbvpp" in test_name:
1053         file_name = "link_bonding"
1054
1055     elif "114b" in test_name and "vhost" in test_name:
1056         file_name = "vts"
1057
1058     elif "testpmd" in test_name or "l3fwd" in test_name:
1059         file_name = "dpdk"
1060
1061     elif "memif" in test_name:
1062         file_name = "container_memif"
1063         feature = "-base"
1064
1065     elif "srv6" in test_name:
1066         file_name = "srv6"
1067
1068     elif "vhost" in test_name:
1069         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1070             file_name = "vm_vhost_l2"
1071             if "114b" in test_name:
1072                 feature = ""
1073             elif "l2xcbase" in test_name and "x520" in test_name:
1074                 feature = "-base-l2xc"
1075             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1076                 feature = "-base-l2bd"
1077             else:
1078                 feature = "-base"
1079         elif "ip4base" in test_name:
1080             file_name = "vm_vhost_ip4"
1081             feature = "-base"
1082
1083     elif "ipsecbasetnlsw" in test_name:
1084         file_name = "ipsecsw"
1085         feature = "-base-scale"
1086
1087     elif "ipsec" in test_name:
1088         file_name = "ipsec"
1089         feature = "-base-scale"
1090         if "hw-" in test_name:
1091             file_name = "ipsechw"
1092         elif "sw-" in test_name:
1093             file_name = "ipsecsw"
1094         if "-int-" in test_name:
1095             feature = "-base-scale-int"
1096         elif "tnl" in test_name:
1097             feature = "-base-scale-tnl"
1098
1099     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1100         file_name = "ip4_tunnels"
1101         feature = "-base"
1102
1103     elif "ip4base" in test_name or "ip4scale" in test_name:
1104         file_name = "ip4"
1105         if "xl710" in test_name:
1106             feature = "-base-scale-features"
1107         elif "iacl" in test_name:
1108             feature = "-features-iacl"
1109         elif "oacl" in test_name:
1110             feature = "-features-oacl"
1111         elif "snat" in test_name or "cop" in test_name:
1112             feature = "-features"
1113         else:
1114             feature = "-base-scale"
1115
1116     elif "ip6base" in test_name or "ip6scale" in test_name:
1117         file_name = "ip6"
1118         feature = "-base-scale"
1119
1120     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1121             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1122             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1123         file_name = "l2"
1124         if "macip" in test_name:
1125             feature = "-features-macip"
1126         elif "iacl" in test_name:
1127             feature = "-features-iacl"
1128         elif "oacl" in test_name:
1129             feature = "-features-oacl"
1130         else:
1131             feature = "-base-scale"
1132
1133     if "x520" in test_name:
1134         nic = "x520-"
1135     elif "x710" in test_name:
1136         nic = "x710-"
1137     elif "xl710" in test_name:
1138         nic = "xl710-"
1139     elif "xxv710" in test_name:
1140         nic = "xxv710-"
1141     elif "vic1227" in test_name:
1142         nic = "vic1227-"
1143     elif "vic1385" in test_name:
1144         nic = "vic1385-"
1145     elif "x553" in test_name:
1146         nic = "x553-"
1147     else:
1148         nic = ""
1149     anchor += nic
1150
1151     if "64b" in test_name:
1152         framesize = "64b"
1153     elif "78b" in test_name:
1154         framesize = "78b"
1155     elif "imix" in test_name:
1156         framesize = "imix"
1157     elif "9000b" in test_name:
1158         framesize = "9000b"
1159     elif "1518b" in test_name:
1160         framesize = "1518b"
1161     elif "114b" in test_name:
1162         framesize = "114b"
1163     else:
1164         framesize = ""
1165     anchor += framesize + '-'
1166
1167     if "1t1c" in test_name:
1168         anchor += "1t1c"
1169     elif "2t2c" in test_name:
1170         anchor += "2t2c"
1171     elif "4t4c" in test_name:
1172         anchor += "4t4c"
1173     elif "2t1c" in test_name:
1174         anchor += "2t1c"
1175     elif "4t2c" in test_name:
1176         anchor += "4t2c"
1177     elif "8t4c" in test_name:
1178         anchor += "8t4c"
1179
1180     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1181         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1182
1183
1184 def table_performance_trending_dashboard_html(table, input_data):
1185     """Generate the table(s) with algorithm:
1186     table_performance_trending_dashboard_html specified in the specification
1187     file.
1188
1189     :param table: Table to generate.
1190     :param input_data: Data to process.
1191     :type table: dict
1192     :type input_data: InputData
1193     """
1194
1195     testbed = table.get("testbed", None)
1196     if testbed is None:
1197         logging.error("The testbed is not defined for the table '{0}'.".
1198                       format(table.get("title", "")))
1199         return
1200
1201     logging.info("  Generating the table {0} ...".
1202                  format(table.get("title", "")))
1203
1204     try:
1205         with open(table["input-file"], 'rb') as csv_file:
1206             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1207             csv_lst = [item for item in csv_content]
1208     except KeyError:
1209         logging.warning("The input file is not defined.")
1210         return
1211     except csv.Error as err:
1212         logging.warning("Not possible to process the file '{0}'.\n{1}".
1213                         format(table["input-file"], err))
1214         return
1215
1216     # Table:
1217     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1218
1219     # Table header:
1220     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1221     for idx, item in enumerate(csv_lst[0]):
1222         alignment = "left" if idx == 0 else "center"
1223         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1224         th.text = item
1225
1226     # Rows:
1227     colors = {"regression": ("#ffcccc", "#ff9999"),
1228               "progression": ("#c6ecc6", "#9fdf9f"),
1229               "normal": ("#e9f1fb", "#d4e4f7")}
1230     for r_idx, row in enumerate(csv_lst[1:]):
1231         if int(row[4]):
1232             color = "regression"
1233         elif int(row[5]):
1234             color = "progression"
1235         else:
1236             color = "normal"
1237         background = colors[color][r_idx % 2]
1238         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1239
1240         # Columns:
1241         for c_idx, item in enumerate(row):
1242             alignment = "left" if c_idx == 0 else "center"
1243             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1244             # Name:
1245             if c_idx == 0:
1246                 url = _generate_url("../trending/", testbed, item)
1247                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1248                 ref.text = item
1249             else:
1250                 td.text = item
1251     try:
1252         with open(table["output-file"], 'w') as html_file:
1253             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1254             html_file.write(".. raw:: html\n\n\t")
1255             html_file.write(ET.tostring(dashboard))
1256             html_file.write("\n\t<p><br><br></p>\n")
1257     except KeyError:
1258         logging.warning("The output file is not defined.")
1259         return
1260
1261
1262 def table_last_failed_tests(table, input_data):
1263     """Generate the table(s) with algorithm: table_last_failed_tests
1264     specified in the specification file.
1265
1266     :param table: Table to generate.
1267     :param input_data: Data to process.
1268     :type table: pandas.Series
1269     :type input_data: InputData
1270     """
1271
1272     logging.info("  Generating the table {0} ...".
1273                  format(table.get("title", "")))
1274
1275     # Transform the data
1276     logging.info("    Creating the data set for the {0} '{1}'.".
1277                  format(table.get("type", ""), table.get("title", "")))
1278     data = input_data.filter_data(table, continue_on_error=True)
1279
1280     if data is None or data.empty:
1281         logging.warn("    No data for the {0} '{1}'.".
1282                      format(table.get("type", ""), table.get("title", "")))
1283         return
1284
1285     tbl_list = list()
1286     for job, builds in table["data"].items():
1287         for build in builds:
1288             build = str(build)
1289             try:
1290                 version = input_data.metadata(job, build).get("version", "")
1291             except KeyError:
1292                 logging.error("Data for {job}: {build} is not present.".
1293                               format(job=job, build=build))
1294                 return
1295             tbl_list.append(build)
1296             tbl_list.append(version)
1297             for tst_name, tst_data in data[job][build].iteritems():
1298                 if tst_data["status"] != "FAIL":
1299                     continue
1300                 groups = re.search(REGEX_NIC, tst_data["parent"])
1301                 if not groups:
1302                     continue
1303                 nic = groups.group(0)
1304                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1305
1306     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1307     logging.info("    Writing file: '{0}'".format(file_name))
1308     with open(file_name, "w") as file_handler:
1309         for test in tbl_list:
1310             file_handler.write(test + '\n')
1311
1312
1313 def table_failed_tests(table, input_data):
1314     """Generate the table(s) with algorithm: table_failed_tests
1315     specified in the specification file.
1316
1317     :param table: Table to generate.
1318     :param input_data: Data to process.
1319     :type table: pandas.Series
1320     :type input_data: InputData
1321     """
1322
1323     logging.info("  Generating the table {0} ...".
1324                  format(table.get("title", "")))
1325
1326     # Transform the data
1327     logging.info("    Creating the data set for the {0} '{1}'.".
1328                  format(table.get("type", ""), table.get("title", "")))
1329     data = input_data.filter_data(table, continue_on_error=True)
1330
1331     # Prepare the header of the tables
1332     header = ["Test Case",
1333               "Failures [#]",
1334               "Last Failure [Time]",
1335               "Last Failure [VPP-Build-Id]",
1336               "Last Failure [CSIT-Job-Build-Id]"]
1337
1338     # Generate the data for the table according to the model in the table
1339     # specification
1340
1341     now = dt.utcnow()
1342     timeperiod = timedelta(int(table.get("window", 7)))
1343
1344     tbl_dict = dict()
1345     for job, builds in table["data"].items():
1346         for build in builds:
1347             build = str(build)
1348             for tst_name, tst_data in data[job][build].iteritems():
1349                 if tst_name.lower() in table.get("ignore-list", list()):
1350                     continue
1351                 if tbl_dict.get(tst_name, None) is None:
1352                     groups = re.search(REGEX_NIC, tst_data["parent"])
1353                     if not groups:
1354                         continue
1355                     nic = groups.group(0)
1356                     tbl_dict[tst_name] = {
1357                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1358                         "data": OrderedDict()}
1359                 try:
1360                     generated = input_data.metadata(job, build).\
1361                         get("generated", "")
1362                     if not generated:
1363                         continue
1364                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1365                     if (now - then) <= timeperiod:
1366                         tbl_dict[tst_name]["data"][build] = (
1367                             tst_data["status"],
1368                             generated,
1369                             input_data.metadata(job, build).get("version", ""),
1370                             build)
1371                 except (TypeError, KeyError) as err:
1372                     logging.warning("tst_name: {} - err: {}".
1373                                     format(tst_name, repr(err)))
1374
1375     max_fails = 0
1376     tbl_lst = list()
1377     for tst_data in tbl_dict.values():
1378         fails_nr = 0
1379         for val in tst_data["data"].values():
1380             if val[0] == "FAIL":
1381                 fails_nr += 1
1382                 fails_last_date = val[1]
1383                 fails_last_vpp = val[2]
1384                 fails_last_csit = val[3]
1385         if fails_nr:
1386             max_fails = fails_nr if fails_nr > max_fails else max_fails
1387             tbl_lst.append([tst_data["name"],
1388                             fails_nr,
1389                             fails_last_date,
1390                             fails_last_vpp,
1391                             "mrr-daily-build-{0}".format(fails_last_csit)])
1392
1393     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1394     tbl_sorted = list()
1395     for nrf in range(max_fails, -1, -1):
1396         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1397         tbl_sorted.extend(tbl_fails)
1398     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1399
1400     logging.info("    Writing file: '{0}'".format(file_name))
1401     with open(file_name, "w") as file_handler:
1402         file_handler.write(",".join(header) + "\n")
1403         for test in tbl_sorted:
1404             file_handler.write(",".join([str(item) for item in test]) + '\n')
1405
1406     txt_file_name = "{0}.txt".format(table["output-file"])
1407     logging.info("    Writing file: '{0}'".format(txt_file_name))
1408     convert_csv_to_pretty_txt(file_name, txt_file_name)
1409
1410
1411 def table_failed_tests_html(table, input_data):
1412     """Generate the table(s) with algorithm: table_failed_tests_html
1413     specified in the specification file.
1414
1415     :param table: Table to generate.
1416     :param input_data: Data to process.
1417     :type table: pandas.Series
1418     :type input_data: InputData
1419     """
1420
1421     testbed = table.get("testbed", None)
1422     if testbed is None:
1423         logging.error("The testbed is not defined for the table '{0}'.".
1424                       format(table.get("title", "")))
1425         return
1426
1427     logging.info("  Generating the table {0} ...".
1428                  format(table.get("title", "")))
1429
1430     try:
1431         with open(table["input-file"], 'rb') as csv_file:
1432             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1433             csv_lst = [item for item in csv_content]
1434     except KeyError:
1435         logging.warning("The input file is not defined.")
1436         return
1437     except csv.Error as err:
1438         logging.warning("Not possible to process the file '{0}'.\n{1}".
1439                         format(table["input-file"], err))
1440         return
1441
1442     # Table:
1443     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1444
1445     # Table header:
1446     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1447     for idx, item in enumerate(csv_lst[0]):
1448         alignment = "left" if idx == 0 else "center"
1449         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1450         th.text = item
1451
1452     # Rows:
1453     colors = ("#e9f1fb", "#d4e4f7")
1454     for r_idx, row in enumerate(csv_lst[1:]):
1455         background = colors[r_idx % 2]
1456         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1457
1458         # Columns:
1459         for c_idx, item in enumerate(row):
1460             alignment = "left" if c_idx == 0 else "center"
1461             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1462             # Name:
1463             if c_idx == 0:
1464                 url = _generate_url("../trending/", testbed, item)
1465                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1466                 ref.text = item
1467             else:
1468                 td.text = item
1469     try:
1470         with open(table["output-file"], 'w') as html_file:
1471             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1472             html_file.write(".. raw:: html\n\n\t")
1473             html_file.write(ET.tostring(failed_tests))
1474             html_file.write("\n\t<p><br><br></p>\n")
1475     except KeyError:
1476         logging.warning("The output file is not defined.")
1477         return