CSIT-1590: Performance comparison analysis
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 if tbl_dict.get(tst_name_mod, None) is None:
300                     groups = re.search(REGEX_NIC, tst_data["parent"])
301                     nic = groups.group(0) if groups else ""
302                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
303                                                           split("-")[:-1]))
304                     if "across testbeds" in table["title"].lower() or \
305                             "across topologies" in table["title"].lower():
306                         name = name.\
307                             replace("1t1c", "1c").replace("2t1c", "1c").\
308                             replace("2t2c", "2c").replace("4t2c", "2c").\
309                             replace("4t4c", "4c").replace("8t4c", "4c")
310                     tbl_dict[tst_name_mod] = {"name": name,
311                                               "ref-data": list(),
312                                               "cmp-data": list()}
313                 try:
314                     # TODO: Re-work when NDRPDRDISC tests are not used
315                     if table["include-tests"] == "MRR":
316                         tbl_dict[tst_name_mod]["cmp-data"]. \
317                             append(tst_data["result"]["receive-rate"].avg)
318                     elif table["include-tests"] == "PDR":
319                         if tst_data["type"] == "PDR":
320                             tbl_dict[tst_name_mod]["cmp-data"]. \
321                                 append(tst_data["throughput"]["value"])
322                         elif tst_data["type"] == "NDRPDR":
323                             tbl_dict[tst_name_mod]["cmp-data"].append(
324                                 tst_data["throughput"]["PDR"]["LOWER"])
325                     elif table["include-tests"] == "NDR":
326                         if tst_data["type"] == "NDR":
327                             tbl_dict[tst_name_mod]["cmp-data"]. \
328                                 append(tst_data["throughput"]["value"])
329                         elif tst_data["type"] == "NDRPDR":
330                             tbl_dict[tst_name_mod]["cmp-data"].append(
331                                 tst_data["throughput"]["NDR"]["LOWER"])
332                     else:
333                         continue
334                 except (KeyError, TypeError):
335                     pass
336     if history:
337         for item in history:
338             for job, builds in item["data"].items():
339                 for build in builds:
340                     for tst_name, tst_data in data[job][str(build)].iteritems():
341                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343                             replace("-ndrdisc", "").replace("-pdr", ""). \
344                             replace("-ndr", "").\
345                             replace("1t1c", "1c").replace("2t1c", "1c").\
346                             replace("2t2c", "2c").replace("4t2c", "2c").\
347                             replace("4t4c", "4c").replace("8t4c", "4c")
348                         if "across topologies" in table["title"].lower():
349                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
350                         if tbl_dict.get(tst_name_mod, None) is None:
351                             continue
352                         if tbl_dict[tst_name_mod].get("history", None) is None:
353                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
354                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
355                                                              None) is None:
356                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
357                                 list()
358                         try:
359                             # TODO: Re-work when NDRPDRDISC tests are not used
360                             if table["include-tests"] == "MRR":
361                                 tbl_dict[tst_name_mod]["history"][item["title"
362                                 ]].append(tst_data["result"]["receive-rate"].
363                                           avg)
364                             elif table["include-tests"] == "PDR":
365                                 if tst_data["type"] == "PDR":
366                                     tbl_dict[tst_name_mod]["history"][
367                                         item["title"]].\
368                                         append(tst_data["throughput"]["value"])
369                                 elif tst_data["type"] == "NDRPDR":
370                                     tbl_dict[tst_name_mod]["history"][item[
371                                         "title"]].append(tst_data["throughput"][
372                                         "PDR"]["LOWER"])
373                             elif table["include-tests"] == "NDR":
374                                 if tst_data["type"] == "NDR":
375                                     tbl_dict[tst_name_mod]["history"][
376                                         item["title"]].\
377                                         append(tst_data["throughput"]["value"])
378                                 elif tst_data["type"] == "NDRPDR":
379                                     tbl_dict[tst_name_mod]["history"][item[
380                                         "title"]].append(tst_data["throughput"][
381                                         "NDR"]["LOWER"])
382                             else:
383                                 continue
384                         except (TypeError, KeyError):
385                             pass
386
387     tbl_lst = list()
388     for tst_name in tbl_dict.keys():
389         item = [tbl_dict[tst_name]["name"], ]
390         if history:
391             if tbl_dict[tst_name].get("history", None) is not None:
392                 for hist_data in tbl_dict[tst_name]["history"].values():
393                     if hist_data:
394                         item.append(round(mean(hist_data) / 1000000, 2))
395                         item.append(round(stdev(hist_data) / 1000000, 2))
396                     else:
397                         item.extend([None, None])
398             else:
399                 item.extend([None, None])
400         data_t = tbl_dict[tst_name]["ref-data"]
401         if data_t:
402             item.append(round(mean(data_t) / 1000000, 2))
403             item.append(round(stdev(data_t) / 1000000, 2))
404         else:
405             item.extend([None, None])
406         data_t = tbl_dict[tst_name]["cmp-data"]
407         if data_t:
408             item.append(round(mean(data_t) / 1000000, 2))
409             item.append(round(stdev(data_t) / 1000000, 2))
410         else:
411             item.extend([None, None])
412         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
413             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
414         else:
415             item.append(None)
416         if len(item) == len(header):
417             tbl_lst.append(item)
418
419     # Sort the table according to the relative change
420     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
421
422     # Generate csv tables:
423     csv_file = "{0}.csv".format(table["output-file"])
424     with open(csv_file, "w") as file_handler:
425         file_handler.write(header_str)
426         for test in tbl_lst:
427             file_handler.write(",".join([str(item) for item in test]) + "\n")
428
429     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
430
431
432 def table_performance_comparison_nic(table, input_data):
433     """Generate the table(s) with algorithm: table_performance_comparison
434     specified in the specification file.
435
436     :param table: Table to generate.
437     :param input_data: Data to process.
438     :type table: pandas.Series
439     :type input_data: InputData
440     """
441
442     logging.info("  Generating the table {0} ...".
443                  format(table.get("title", "")))
444
445     # Transform the data
446     logging.info("    Creating the data set for the {0} '{1}'.".
447                  format(table.get("type", ""), table.get("title", "")))
448     data = input_data.filter_data(table, continue_on_error=True)
449
450     # Prepare the header of the tables
451     try:
452         header = ["Test case", ]
453
454         if table["include-tests"] == "MRR":
455             hdr_param = "Receive Rate"
456         else:
457             hdr_param = "Throughput"
458
459         history = table.get("history", None)
460         if history:
461             for item in history:
462                 header.extend(
463                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
464                      "{0} Stdev [Mpps]".format(item["title"])])
465         header.extend(
466             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
467              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
468              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
469              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
470              "Delta [%]"])
471         header_str = ",".join(header) + "\n"
472     except (AttributeError, KeyError) as err:
473         logging.error("The model is invalid, missing parameter: {0}".
474                       format(err))
475         return
476
477     # Prepare data to the table:
478     tbl_dict = dict()
479     for job, builds in table["reference"]["data"].items():
480         for build in builds:
481             for tst_name, tst_data in data[job][str(build)].iteritems():
482                 if table["reference"]["nic"] not in tst_data["tags"]:
483                     continue
484                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
485                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
486                     replace("-ndrdisc", "").replace("-pdr", "").\
487                     replace("-ndr", "").\
488                     replace("1t1c", "1c").replace("2t1c", "1c").\
489                     replace("2t2c", "2c").replace("4t2c", "2c").\
490                     replace("4t4c", "4c").replace("8t4c", "4c")
491                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
492                 if "across topologies" in table["title"].lower():
493                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
494                 if tbl_dict.get(tst_name_mod, None) is None:
495                     name = "{0}".format("-".join(tst_data["name"].
496                                                  split("-")[:-1]))
497                     if "across testbeds" in table["title"].lower() or \
498                             "across topologies" in table["title"].lower():
499                         name = name.\
500                             replace("1t1c", "1c").replace("2t1c", "1c").\
501                             replace("2t2c", "2c").replace("4t2c", "2c").\
502                             replace("4t4c", "4c").replace("8t4c", "4c")
503                     tbl_dict[tst_name_mod] = {"name": name,
504                                               "ref-data": list(),
505                                               "cmp-data": list()}
506                 try:
507                     # TODO: Re-work when NDRPDRDISC tests are not used
508                     if table["include-tests"] == "MRR":
509                         tbl_dict[tst_name_mod]["ref-data"]. \
510                             append(tst_data["result"]["receive-rate"].avg)
511                     elif table["include-tests"] == "PDR":
512                         if tst_data["type"] == "PDR":
513                             tbl_dict[tst_name_mod]["ref-data"]. \
514                                 append(tst_data["throughput"]["value"])
515                         elif tst_data["type"] == "NDRPDR":
516                             tbl_dict[tst_name_mod]["ref-data"].append(
517                                 tst_data["throughput"]["PDR"]["LOWER"])
518                     elif table["include-tests"] == "NDR":
519                         if tst_data["type"] == "NDR":
520                             tbl_dict[tst_name_mod]["ref-data"]. \
521                                 append(tst_data["throughput"]["value"])
522                         elif tst_data["type"] == "NDRPDR":
523                             tbl_dict[tst_name_mod]["ref-data"].append(
524                                 tst_data["throughput"]["NDR"]["LOWER"])
525                     else:
526                         continue
527                 except TypeError:
528                     pass  # No data in output.xml for this test
529
530     for job, builds in table["compare"]["data"].items():
531         for build in builds:
532             for tst_name, tst_data in data[job][str(build)].iteritems():
533                 if table["compare"]["nic"] not in tst_data["tags"]:
534                     continue
535                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
536                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
537                     replace("-ndrdisc", "").replace("-pdr", ""). \
538                     replace("-ndr", "").\
539                     replace("1t1c", "1c").replace("2t1c", "1c").\
540                     replace("2t2c", "2c").replace("4t2c", "2c").\
541                     replace("4t4c", "4c").replace("8t4c", "4c")
542                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
543                 if "across topologies" in table["title"].lower():
544                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
545                 if tbl_dict.get(tst_name_mod, None) is None:
546                     name = "{0}".format("-".join(tst_data["name"].
547                                                  split("-")[:-1]))
548                     if "across testbeds" in table["title"].lower() or \
549                             "across topologies" in table["title"].lower():
550                         name = name.\
551                             replace("1t1c", "1c").replace("2t1c", "1c").\
552                             replace("2t2c", "2c").replace("4t2c", "2c").\
553                             replace("4t4c", "4c").replace("8t4c", "4c")
554                     tbl_dict[tst_name_mod] = {"name": name,
555                                               "ref-data": list(),
556                                               "cmp-data": list()}
557                 try:
558                     # TODO: Re-work when NDRPDRDISC tests are not used
559                     if table["include-tests"] == "MRR":
560                         tbl_dict[tst_name_mod]["cmp-data"]. \
561                             append(tst_data["result"]["receive-rate"].avg)
562                     elif table["include-tests"] == "PDR":
563                         if tst_data["type"] == "PDR":
564                             tbl_dict[tst_name_mod]["cmp-data"]. \
565                                 append(tst_data["throughput"]["value"])
566                         elif tst_data["type"] == "NDRPDR":
567                             tbl_dict[tst_name_mod]["cmp-data"].append(
568                                 tst_data["throughput"]["PDR"]["LOWER"])
569                     elif table["include-tests"] == "NDR":
570                         if tst_data["type"] == "NDR":
571                             tbl_dict[tst_name_mod]["cmp-data"]. \
572                                 append(tst_data["throughput"]["value"])
573                         elif tst_data["type"] == "NDRPDR":
574                             tbl_dict[tst_name_mod]["cmp-data"].append(
575                                 tst_data["throughput"]["NDR"]["LOWER"])
576                     else:
577                         continue
578                 except (KeyError, TypeError):
579                     pass
580
581     if history:
582         for item in history:
583             for job, builds in item["data"].items():
584                 for build in builds:
585                     for tst_name, tst_data in data[job][str(build)].iteritems():
586                         if item["nic"] not in tst_data["tags"]:
587                             continue
588                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
589                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
590                             replace("-ndrdisc", "").replace("-pdr", ""). \
591                             replace("-ndr", "").\
592                             replace("1t1c", "1c").replace("2t1c", "1c").\
593                             replace("2t2c", "2c").replace("4t2c", "2c").\
594                             replace("4t4c", "4c").replace("8t4c", "4c")
595                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
596                         if "across topologies" in table["title"].lower():
597                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
598                         if tbl_dict.get(tst_name_mod, None) is None:
599                             continue
600                         if tbl_dict[tst_name_mod].get("history", None) is None:
601                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
602                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
603                                                              None) is None:
604                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
605                                 list()
606                         try:
607                             # TODO: Re-work when NDRPDRDISC tests are not used
608                             if table["include-tests"] == "MRR":
609                                 tbl_dict[tst_name_mod]["history"][item["title"
610                                 ]].append(tst_data["result"]["receive-rate"].
611                                           avg)
612                             elif table["include-tests"] == "PDR":
613                                 if tst_data["type"] == "PDR":
614                                     tbl_dict[tst_name_mod]["history"][
615                                         item["title"]].\
616                                         append(tst_data["throughput"]["value"])
617                                 elif tst_data["type"] == "NDRPDR":
618                                     tbl_dict[tst_name_mod]["history"][item[
619                                         "title"]].append(tst_data["throughput"][
620                                         "PDR"]["LOWER"])
621                             elif table["include-tests"] == "NDR":
622                                 if tst_data["type"] == "NDR":
623                                     tbl_dict[tst_name_mod]["history"][
624                                         item["title"]].\
625                                         append(tst_data["throughput"]["value"])
626                                 elif tst_data["type"] == "NDRPDR":
627                                     tbl_dict[tst_name_mod]["history"][item[
628                                         "title"]].append(tst_data["throughput"][
629                                         "NDR"]["LOWER"])
630                             else:
631                                 continue
632                         except (TypeError, KeyError):
633                             pass
634
635     tbl_lst = list()
636     for tst_name in tbl_dict.keys():
637         item = [tbl_dict[tst_name]["name"], ]
638         if history:
639             if tbl_dict[tst_name].get("history", None) is not None:
640                 for hist_data in tbl_dict[tst_name]["history"].values():
641                     if hist_data:
642                         item.append(round(mean(hist_data) / 1000000, 2))
643                         item.append(round(stdev(hist_data) / 1000000, 2))
644                     else:
645                         item.extend([None, None])
646             else:
647                 item.extend([None, None])
648         data_t = tbl_dict[tst_name]["ref-data"]
649         if data_t:
650             item.append(round(mean(data_t) / 1000000, 2))
651             item.append(round(stdev(data_t) / 1000000, 2))
652         else:
653             item.extend([None, None])
654         data_t = tbl_dict[tst_name]["cmp-data"]
655         if data_t:
656             item.append(round(mean(data_t) / 1000000, 2))
657             item.append(round(stdev(data_t) / 1000000, 2))
658         else:
659             item.extend([None, None])
660         if "dot1q" in tbl_dict[tst_name]["name"]:
661             item.append("Changed methodology")
662         elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
663             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
664         else:
665             item.append("n/a")
666         if len(item) == len(header):
667             tbl_lst.append(item)
668
669     # Sort the table according to the relative change
670     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
671
672     # Generate csv tables:
673     csv_file = "{0}.csv".format(table["output-file"])
674     with open(csv_file, "w") as file_handler:
675         file_handler.write(header_str)
676         for test in tbl_lst:
677             file_handler.write(",".join([str(item) for item in test]) + "\n")
678
679     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
680
681
682 def table_nics_comparison(table, input_data):
683     """Generate the table(s) with algorithm: table_nics_comparison
684     specified in the specification file.
685
686     :param table: Table to generate.
687     :param input_data: Data to process.
688     :type table: pandas.Series
689     :type input_data: InputData
690     """
691
692     logging.info("  Generating the table {0} ...".
693                  format(table.get("title", "")))
694
695     # Transform the data
696     logging.info("    Creating the data set for the {0} '{1}'.".
697                  format(table.get("type", ""), table.get("title", "")))
698     data = input_data.filter_data(table, continue_on_error=True)
699
700     # Prepare the header of the tables
701     try:
702         header = ["Test case", ]
703
704         if table["include-tests"] == "MRR":
705             hdr_param = "Receive Rate"
706         else:
707             hdr_param = "Throughput"
708
709         header.extend(
710             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
711              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
712              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
713              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
714              "Delta [%]"])
715         header_str = ",".join(header) + "\n"
716     except (AttributeError, KeyError) as err:
717         logging.error("The model is invalid, missing parameter: {0}".
718                       format(err))
719         return
720
721     # Prepare data to the table:
722     tbl_dict = dict()
723     for job, builds in table["data"].items():
724         for build in builds:
725             for tst_name, tst_data in data[job][str(build)].iteritems():
726                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
727                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
728                     replace("-ndrdisc", "").replace("-pdr", "").\
729                     replace("-ndr", "").\
730                     replace("1t1c", "1c").replace("2t1c", "1c").\
731                     replace("2t2c", "2c").replace("4t2c", "2c").\
732                     replace("4t4c", "4c").replace("8t4c", "4c")
733                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
734                 if tbl_dict.get(tst_name_mod, None) is None:
735                     name = "-".join(tst_data["name"].split("-")[:-1])
736                     tbl_dict[tst_name_mod] = {"name": name,
737                                               "ref-data": list(),
738                                               "cmp-data": list()}
739                 try:
740                     if table["include-tests"] == "MRR":
741                         result = tst_data["result"]["receive-rate"].avg
742                     elif table["include-tests"] == "PDR":
743                         result = tst_data["throughput"]["PDR"]["LOWER"]
744                     elif table["include-tests"] == "NDR":
745                         result = tst_data["throughput"]["NDR"]["LOWER"]
746                     else:
747                         result = None
748
749                     if result:
750                         if table["reference"]["nic"] in tst_data["tags"]:
751                             tbl_dict[tst_name_mod]["ref-data"].append(result)
752                         elif table["compare"]["nic"] in tst_data["tags"]:
753                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
754                 except (TypeError, KeyError) as err:
755                     logging.debug("No data for {0}".format(tst_name))
756                     logging.debug(repr(err))
757                     # No data in output.xml for this test
758
759     tbl_lst = list()
760     for tst_name in tbl_dict.keys():
761         item = [tbl_dict[tst_name]["name"], ]
762         data_t = tbl_dict[tst_name]["ref-data"]
763         if data_t:
764             item.append(round(mean(data_t) / 1000000, 2))
765             item.append(round(stdev(data_t) / 1000000, 2))
766         else:
767             item.extend([None, None])
768         data_t = tbl_dict[tst_name]["cmp-data"]
769         if data_t:
770             item.append(round(mean(data_t) / 1000000, 2))
771             item.append(round(stdev(data_t) / 1000000, 2))
772         else:
773             item.extend([None, None])
774         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
775             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
776         if len(item) == len(header):
777             tbl_lst.append(item)
778
779     # Sort the table according to the relative change
780     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
781
782     # Generate csv tables:
783     csv_file = "{0}.csv".format(table["output-file"])
784     with open(csv_file, "w") as file_handler:
785         file_handler.write(header_str)
786         for test in tbl_lst:
787             file_handler.write(",".join([str(item) for item in test]) + "\n")
788
789     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
790
791
792 def table_soak_vs_ndr(table, input_data):
793     """Generate the table(s) with algorithm: table_soak_vs_ndr
794     specified in the specification file.
795
796     :param table: Table to generate.
797     :param input_data: Data to process.
798     :type table: pandas.Series
799     :type input_data: InputData
800     """
801
802     logging.info("  Generating the table {0} ...".
803                  format(table.get("title", "")))
804
805     # Transform the data
806     logging.info("    Creating the data set for the {0} '{1}'.".
807                  format(table.get("type", ""), table.get("title", "")))
808     data = input_data.filter_data(table, continue_on_error=True)
809
810     # Prepare the header of the table
811     try:
812         header = [
813             "Test case",
814             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
815             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
816             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
817             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
818             "Delta [%]", "Stdev of delta [%]"]
819         header_str = ",".join(header) + "\n"
820     except (AttributeError, KeyError) as err:
821         logging.error("The model is invalid, missing parameter: {0}".
822                       format(err))
823         return
824
825     # Create a list of available SOAK test results:
826     tbl_dict = dict()
827     for job, builds in table["compare"]["data"].items():
828         for build in builds:
829             for tst_name, tst_data in data[job][str(build)].iteritems():
830                 if tst_data["type"] == "SOAK":
831                     tst_name_mod = tst_name.replace("-soak", "")
832                     if tbl_dict.get(tst_name_mod, None) is None:
833                         groups = re.search(REGEX_NIC, tst_data["parent"])
834                         nic = groups.group(0) if groups else ""
835                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
836                                                               split("-")[:-1]))
837                         tbl_dict[tst_name_mod] = {
838                             "name": name,
839                             "ref-data": list(),
840                             "cmp-data": list()
841                         }
842                     try:
843                         tbl_dict[tst_name_mod]["cmp-data"].append(
844                             tst_data["throughput"]["LOWER"])
845                     except (KeyError, TypeError):
846                         pass
847     tests_lst = tbl_dict.keys()
848
849     # Add corresponding NDR test results:
850     for job, builds in table["reference"]["data"].items():
851         for build in builds:
852             for tst_name, tst_data in data[job][str(build)].iteritems():
853                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
854                     replace("-mrr", "")
855                 if tst_name_mod in tests_lst:
856                     try:
857                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
858                             if table["include-tests"] == "MRR":
859                                 result = tst_data["result"]["receive-rate"].avg
860                             elif table["include-tests"] == "PDR":
861                                 result = tst_data["throughput"]["PDR"]["LOWER"]
862                             elif table["include-tests"] == "NDR":
863                                 result = tst_data["throughput"]["NDR"]["LOWER"]
864                             else:
865                                 result = None
866                             if result is not None:
867                                 tbl_dict[tst_name_mod]["ref-data"].append(
868                                     result)
869                     except (KeyError, TypeError):
870                         continue
871
872     tbl_lst = list()
873     for tst_name in tbl_dict.keys():
874         item = [tbl_dict[tst_name]["name"], ]
875         data_r = tbl_dict[tst_name]["ref-data"]
876         if data_r:
877             data_r_mean = mean(data_r)
878             item.append(round(data_r_mean / 1000000, 2))
879             data_r_stdev = stdev(data_r)
880             item.append(round(data_r_stdev / 1000000, 2))
881         else:
882             data_r_mean = None
883             data_r_stdev = None
884             item.extend([None, None])
885         data_c = tbl_dict[tst_name]["cmp-data"]
886         if data_c:
887             data_c_mean = mean(data_c)
888             item.append(round(data_c_mean / 1000000, 2))
889             data_c_stdev = stdev(data_c)
890             item.append(round(data_c_stdev / 1000000, 2))
891         else:
892             data_c_mean = None
893             data_c_stdev = None
894             item.extend([None, None])
895         if data_r_mean and data_c_mean:
896             delta, d_stdev = relative_change_stdev(
897                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
898             item.append(round(delta, 2))
899             item.append(round(d_stdev, 2))
900             tbl_lst.append(item)
901
902     # Sort the table according to the relative change
903     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
904
905     # Generate csv tables:
906     csv_file = "{0}.csv".format(table["output-file"])
907     with open(csv_file, "w") as file_handler:
908         file_handler.write(header_str)
909         for test in tbl_lst:
910             file_handler.write(",".join([str(item) for item in test]) + "\n")
911
912     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
913
914
915 def table_performance_trending_dashboard(table, input_data):
916     """Generate the table(s) with algorithm:
917     table_performance_trending_dashboard
918     specified in the specification file.
919
920     :param table: Table to generate.
921     :param input_data: Data to process.
922     :type table: pandas.Series
923     :type input_data: InputData
924     """
925
926     logging.info("  Generating the table {0} ...".
927                  format(table.get("title", "")))
928
929     # Transform the data
930     logging.info("    Creating the data set for the {0} '{1}'.".
931                  format(table.get("type", ""), table.get("title", "")))
932     data = input_data.filter_data(table, continue_on_error=True)
933
934     # Prepare the header of the tables
935     header = ["Test Case",
936               "Trend [Mpps]",
937               "Short-Term Change [%]",
938               "Long-Term Change [%]",
939               "Regressions [#]",
940               "Progressions [#]"
941               ]
942     header_str = ",".join(header) + "\n"
943
944     # Prepare data to the table:
945     tbl_dict = dict()
946     for job, builds in table["data"].items():
947         for build in builds:
948             for tst_name, tst_data in data[job][str(build)].iteritems():
949                 if tst_name.lower() in table.get("ignore-list", list()):
950                     continue
951                 if tbl_dict.get(tst_name, None) is None:
952                     groups = re.search(REGEX_NIC, tst_data["parent"])
953                     if not groups:
954                         continue
955                     nic = groups.group(0)
956                     tbl_dict[tst_name] = {
957                         "name": "{0}-{1}".format(nic, tst_data["name"]),
958                         "data": OrderedDict()}
959                 try:
960                     tbl_dict[tst_name]["data"][str(build)] = \
961                         tst_data["result"]["receive-rate"]
962                 except (TypeError, KeyError):
963                     pass  # No data in output.xml for this test
964
965     tbl_lst = list()
966     for tst_name in tbl_dict.keys():
967         data_t = tbl_dict[tst_name]["data"]
968         if len(data_t) < 2:
969             continue
970
971         classification_lst, avgs = classify_anomalies(data_t)
972
973         win_size = min(len(data_t), table["window"])
974         long_win_size = min(len(data_t), table["long-trend-window"])
975
976         try:
977             max_long_avg = max(
978                 [x for x in avgs[-long_win_size:-win_size]
979                  if not isnan(x)])
980         except ValueError:
981             max_long_avg = nan
982         last_avg = avgs[-1]
983         avg_week_ago = avgs[max(-win_size, -len(avgs))]
984
985         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
986             rel_change_last = nan
987         else:
988             rel_change_last = round(
989                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
990
991         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
992             rel_change_long = nan
993         else:
994             rel_change_long = round(
995                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
996
997         if classification_lst:
998             if isnan(rel_change_last) and isnan(rel_change_long):
999                 continue
1000             if (isnan(last_avg) or
1001                 isnan(rel_change_last) or
1002                 isnan(rel_change_long)):
1003                 continue
1004             tbl_lst.append(
1005                 [tbl_dict[tst_name]["name"],
1006                  round(last_avg / 1000000, 2),
1007                  rel_change_last,
1008                  rel_change_long,
1009                  classification_lst[-win_size:].count("regression"),
1010                  classification_lst[-win_size:].count("progression")])
1011
1012     tbl_lst.sort(key=lambda rel: rel[0])
1013
1014     tbl_sorted = list()
1015     for nrr in range(table["window"], -1, -1):
1016         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1017         for nrp in range(table["window"], -1, -1):
1018             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1019             tbl_out.sort(key=lambda rel: rel[2])
1020             tbl_sorted.extend(tbl_out)
1021
1022     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1023
1024     logging.info("    Writing file: '{0}'".format(file_name))
1025     with open(file_name, "w") as file_handler:
1026         file_handler.write(header_str)
1027         for test in tbl_sorted:
1028             file_handler.write(",".join([str(item) for item in test]) + '\n')
1029
1030     txt_file_name = "{0}.txt".format(table["output-file"])
1031     logging.info("    Writing file: '{0}'".format(txt_file_name))
1032     convert_csv_to_pretty_txt(file_name, txt_file_name)
1033
1034
1035 def _generate_url(base, testbed, test_name):
1036     """Generate URL to a trending plot from the name of the test case.
1037
1038     :param base: The base part of URL common to all test cases.
1039     :param testbed: The testbed used for testing.
1040     :param test_name: The name of the test case.
1041     :type base: str
1042     :type testbed: str
1043     :type test_name: str
1044     :returns: The URL to the plot with the trending data for the given test
1045         case.
1046     :rtype str
1047     """
1048
1049     url = base
1050     file_name = ""
1051     anchor = ".html#"
1052     feature = ""
1053
1054     if "lbdpdk" in test_name or "lbvpp" in test_name:
1055         file_name = "link_bonding"
1056
1057     elif "114b" in test_name and "vhost" in test_name:
1058         file_name = "vts"
1059
1060     elif "testpmd" in test_name or "l3fwd" in test_name:
1061         file_name = "dpdk"
1062
1063     elif "memif" in test_name:
1064         file_name = "container_memif"
1065         feature = "-base"
1066
1067     elif "srv6" in test_name:
1068         file_name = "srv6"
1069
1070     elif "vhost" in test_name:
1071         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1072             file_name = "vm_vhost_l2"
1073             if "114b" in test_name:
1074                 feature = ""
1075             elif "l2xcbase" in test_name and "x520" in test_name:
1076                 feature = "-base-l2xc"
1077             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1078                 feature = "-base-l2bd"
1079             else:
1080                 feature = "-base"
1081         elif "ip4base" in test_name:
1082             file_name = "vm_vhost_ip4"
1083             feature = "-base"
1084
1085     elif "ipsecbasetnlsw" in test_name:
1086         file_name = "ipsecsw"
1087         feature = "-base-scale"
1088
1089     elif "ipsec" in test_name:
1090         file_name = "ipsec"
1091         feature = "-base-scale"
1092         if "hw-" in test_name:
1093             file_name = "ipsechw"
1094         elif "sw-" in test_name:
1095             file_name = "ipsecsw"
1096         if "-int-" in test_name:
1097             feature = "-base-scale-int"
1098         elif "tnl" in test_name:
1099             feature = "-base-scale-tnl"
1100
1101     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1102         file_name = "ip4_tunnels"
1103         feature = "-base"
1104
1105     elif "ip4base" in test_name or "ip4scale" in test_name:
1106         file_name = "ip4"
1107         if "xl710" in test_name:
1108             feature = "-base-scale-features"
1109         elif "iacl" in test_name:
1110             feature = "-features-iacl"
1111         elif "oacl" in test_name:
1112             feature = "-features-oacl"
1113         elif "snat" in test_name or "cop" in test_name:
1114             feature = "-features"
1115         else:
1116             feature = "-base-scale"
1117
1118     elif "ip6base" in test_name or "ip6scale" in test_name:
1119         file_name = "ip6"
1120         feature = "-base-scale"
1121
1122     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1123             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1124             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1125         file_name = "l2"
1126         if "macip" in test_name:
1127             feature = "-features-macip"
1128         elif "iacl" in test_name:
1129             feature = "-features-iacl"
1130         elif "oacl" in test_name:
1131             feature = "-features-oacl"
1132         else:
1133             feature = "-base-scale"
1134
1135     if "x520" in test_name:
1136         nic = "x520-"
1137     elif "x710" in test_name:
1138         nic = "x710-"
1139     elif "xl710" in test_name:
1140         nic = "xl710-"
1141     elif "xxv710" in test_name:
1142         nic = "xxv710-"
1143     elif "vic1227" in test_name:
1144         nic = "vic1227-"
1145     elif "vic1385" in test_name:
1146         nic = "vic1385-"
1147     elif "x553" in test_name:
1148         nic = "x553-"
1149     else:
1150         nic = ""
1151     anchor += nic
1152
1153     if "64b" in test_name:
1154         framesize = "64b"
1155     elif "78b" in test_name:
1156         framesize = "78b"
1157     elif "imix" in test_name:
1158         framesize = "imix"
1159     elif "9000b" in test_name:
1160         framesize = "9000b"
1161     elif "1518b" in test_name:
1162         framesize = "1518b"
1163     elif "114b" in test_name:
1164         framesize = "114b"
1165     else:
1166         framesize = ""
1167     anchor += framesize + '-'
1168
1169     if "1t1c" in test_name:
1170         anchor += "1t1c"
1171     elif "2t2c" in test_name:
1172         anchor += "2t2c"
1173     elif "4t4c" in test_name:
1174         anchor += "4t4c"
1175     elif "2t1c" in test_name:
1176         anchor += "2t1c"
1177     elif "4t2c" in test_name:
1178         anchor += "4t2c"
1179     elif "8t4c" in test_name:
1180         anchor += "8t4c"
1181
1182     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1183         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1184
1185
1186 def table_performance_trending_dashboard_html(table, input_data):
1187     """Generate the table(s) with algorithm:
1188     table_performance_trending_dashboard_html specified in the specification
1189     file.
1190
1191     :param table: Table to generate.
1192     :param input_data: Data to process.
1193     :type table: dict
1194     :type input_data: InputData
1195     """
1196
1197     testbed = table.get("testbed", None)
1198     if testbed is None:
1199         logging.error("The testbed is not defined for the table '{0}'.".
1200                       format(table.get("title", "")))
1201         return
1202
1203     logging.info("  Generating the table {0} ...".
1204                  format(table.get("title", "")))
1205
1206     try:
1207         with open(table["input-file"], 'rb') as csv_file:
1208             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1209             csv_lst = [item for item in csv_content]
1210     except KeyError:
1211         logging.warning("The input file is not defined.")
1212         return
1213     except csv.Error as err:
1214         logging.warning("Not possible to process the file '{0}'.\n{1}".
1215                         format(table["input-file"], err))
1216         return
1217
1218     # Table:
1219     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1220
1221     # Table header:
1222     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1223     for idx, item in enumerate(csv_lst[0]):
1224         alignment = "left" if idx == 0 else "center"
1225         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1226         th.text = item
1227
1228     # Rows:
1229     colors = {"regression": ("#ffcccc", "#ff9999"),
1230               "progression": ("#c6ecc6", "#9fdf9f"),
1231               "normal": ("#e9f1fb", "#d4e4f7")}
1232     for r_idx, row in enumerate(csv_lst[1:]):
1233         if int(row[4]):
1234             color = "regression"
1235         elif int(row[5]):
1236             color = "progression"
1237         else:
1238             color = "normal"
1239         background = colors[color][r_idx % 2]
1240         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1241
1242         # Columns:
1243         for c_idx, item in enumerate(row):
1244             alignment = "left" if c_idx == 0 else "center"
1245             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1246             # Name:
1247             if c_idx == 0:
1248                 url = _generate_url("../trending/", testbed, item)
1249                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1250                 ref.text = item
1251             else:
1252                 td.text = item
1253     try:
1254         with open(table["output-file"], 'w') as html_file:
1255             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1256             html_file.write(".. raw:: html\n\n\t")
1257             html_file.write(ET.tostring(dashboard))
1258             html_file.write("\n\t<p><br><br></p>\n")
1259     except KeyError:
1260         logging.warning("The output file is not defined.")
1261         return
1262
1263
1264 def table_last_failed_tests(table, input_data):
1265     """Generate the table(s) with algorithm: table_last_failed_tests
1266     specified in the specification file.
1267
1268     :param table: Table to generate.
1269     :param input_data: Data to process.
1270     :type table: pandas.Series
1271     :type input_data: InputData
1272     """
1273
1274     logging.info("  Generating the table {0} ...".
1275                  format(table.get("title", "")))
1276
1277     # Transform the data
1278     logging.info("    Creating the data set for the {0} '{1}'.".
1279                  format(table.get("type", ""), table.get("title", "")))
1280     data = input_data.filter_data(table, continue_on_error=True)
1281
1282     if data is None or data.empty:
1283         logging.warn("    No data for the {0} '{1}'.".
1284                      format(table.get("type", ""), table.get("title", "")))
1285         return
1286
1287     tbl_list = list()
1288     for job, builds in table["data"].items():
1289         for build in builds:
1290             build = str(build)
1291             try:
1292                 version = input_data.metadata(job, build).get("version", "")
1293             except KeyError:
1294                 logging.error("Data for {job}: {build} is not present.".
1295                               format(job=job, build=build))
1296                 return
1297             tbl_list.append(build)
1298             tbl_list.append(version)
1299             for tst_name, tst_data in data[job][build].iteritems():
1300                 if tst_data["status"] != "FAIL":
1301                     continue
1302                 groups = re.search(REGEX_NIC, tst_data["parent"])
1303                 if not groups:
1304                     continue
1305                 nic = groups.group(0)
1306                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1307
1308     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1309     logging.info("    Writing file: '{0}'".format(file_name))
1310     with open(file_name, "w") as file_handler:
1311         for test in tbl_list:
1312             file_handler.write(test + '\n')
1313
1314
1315 def table_failed_tests(table, input_data):
1316     """Generate the table(s) with algorithm: table_failed_tests
1317     specified in the specification file.
1318
1319     :param table: Table to generate.
1320     :param input_data: Data to process.
1321     :type table: pandas.Series
1322     :type input_data: InputData
1323     """
1324
1325     logging.info("  Generating the table {0} ...".
1326                  format(table.get("title", "")))
1327
1328     # Transform the data
1329     logging.info("    Creating the data set for the {0} '{1}'.".
1330                  format(table.get("type", ""), table.get("title", "")))
1331     data = input_data.filter_data(table, continue_on_error=True)
1332
1333     # Prepare the header of the tables
1334     header = ["Test Case",
1335               "Failures [#]",
1336               "Last Failure [Time]",
1337               "Last Failure [VPP-Build-Id]",
1338               "Last Failure [CSIT-Job-Build-Id]"]
1339
1340     # Generate the data for the table according to the model in the table
1341     # specification
1342
1343     now = dt.utcnow()
1344     timeperiod = timedelta(int(table.get("window", 7)))
1345
1346     tbl_dict = dict()
1347     for job, builds in table["data"].items():
1348         for build in builds:
1349             build = str(build)
1350             for tst_name, tst_data in data[job][build].iteritems():
1351                 if tst_name.lower() in table.get("ignore-list", list()):
1352                     continue
1353                 if tbl_dict.get(tst_name, None) is None:
1354                     groups = re.search(REGEX_NIC, tst_data["parent"])
1355                     if not groups:
1356                         continue
1357                     nic = groups.group(0)
1358                     tbl_dict[tst_name] = {
1359                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1360                         "data": OrderedDict()}
1361                 try:
1362                     generated = input_data.metadata(job, build).\
1363                         get("generated", "")
1364                     if not generated:
1365                         continue
1366                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1367                     if (now - then) <= timeperiod:
1368                         tbl_dict[tst_name]["data"][build] = (
1369                             tst_data["status"],
1370                             generated,
1371                             input_data.metadata(job, build).get("version", ""),
1372                             build)
1373                 except (TypeError, KeyError) as err:
1374                     logging.warning("tst_name: {} - err: {}".
1375                                     format(tst_name, repr(err)))
1376
1377     max_fails = 0
1378     tbl_lst = list()
1379     for tst_data in tbl_dict.values():
1380         fails_nr = 0
1381         for val in tst_data["data"].values():
1382             if val[0] == "FAIL":
1383                 fails_nr += 1
1384                 fails_last_date = val[1]
1385                 fails_last_vpp = val[2]
1386                 fails_last_csit = val[3]
1387         if fails_nr:
1388             max_fails = fails_nr if fails_nr > max_fails else max_fails
1389             tbl_lst.append([tst_data["name"],
1390                             fails_nr,
1391                             fails_last_date,
1392                             fails_last_vpp,
1393                             "mrr-daily-build-{0}".format(fails_last_csit)])
1394
1395     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1396     tbl_sorted = list()
1397     for nrf in range(max_fails, -1, -1):
1398         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1399         tbl_sorted.extend(tbl_fails)
1400     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1401
1402     logging.info("    Writing file: '{0}'".format(file_name))
1403     with open(file_name, "w") as file_handler:
1404         file_handler.write(",".join(header) + "\n")
1405         for test in tbl_sorted:
1406             file_handler.write(",".join([str(item) for item in test]) + '\n')
1407
1408     txt_file_name = "{0}.txt".format(table["output-file"])
1409     logging.info("    Writing file: '{0}'".format(txt_file_name))
1410     convert_csv_to_pretty_txt(file_name, txt_file_name)
1411
1412
1413 def table_failed_tests_html(table, input_data):
1414     """Generate the table(s) with algorithm: table_failed_tests_html
1415     specified in the specification file.
1416
1417     :param table: Table to generate.
1418     :param input_data: Data to process.
1419     :type table: pandas.Series
1420     :type input_data: InputData
1421     """
1422
1423     testbed = table.get("testbed", None)
1424     if testbed is None:
1425         logging.error("The testbed is not defined for the table '{0}'.".
1426                       format(table.get("title", "")))
1427         return
1428
1429     logging.info("  Generating the table {0} ...".
1430                  format(table.get("title", "")))
1431
1432     try:
1433         with open(table["input-file"], 'rb') as csv_file:
1434             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1435             csv_lst = [item for item in csv_content]
1436     except KeyError:
1437         logging.warning("The input file is not defined.")
1438         return
1439     except csv.Error as err:
1440         logging.warning("Not possible to process the file '{0}'.\n{1}".
1441                         format(table["input-file"], err))
1442         return
1443
1444     # Table:
1445     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1446
1447     # Table header:
1448     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1449     for idx, item in enumerate(csv_lst[0]):
1450         alignment = "left" if idx == 0 else "center"
1451         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1452         th.text = item
1453
1454     # Rows:
1455     colors = ("#e9f1fb", "#d4e4f7")
1456     for r_idx, row in enumerate(csv_lst[1:]):
1457         background = colors[r_idx % 2]
1458         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1459
1460         # Columns:
1461         for c_idx, item in enumerate(row):
1462             alignment = "left" if c_idx == 0 else "center"
1463             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1464             # Name:
1465             if c_idx == 0:
1466                 url = _generate_url("../trending/", testbed, item)
1467                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1468                 ref.text = item
1469             else:
1470                 td.text = item
1471     try:
1472         with open(table["output-file"], 'w') as html_file:
1473             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1474             html_file.write(".. raw:: html\n\n\t")
1475             html_file.write(ET.tostring(failed_tests))
1476             html_file.write("\n\t<p><br><br></p>\n")
1477     except KeyError:
1478         logging.warning("The output file is not defined.")
1479         return