e873cbffbbc6993eda3057544f4f96e4a4fbf83d
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 try:
300                     # TODO: Re-work when NDRPDRDISC tests are not used
301                     if table["include-tests"] == "MRR":
302                         tbl_dict[tst_name_mod]["cmp-data"]. \
303                             append(tst_data["result"]["receive-rate"].avg)
304                     elif table["include-tests"] == "PDR":
305                         if tst_data["type"] == "PDR":
306                             tbl_dict[tst_name_mod]["cmp-data"]. \
307                                 append(tst_data["throughput"]["value"])
308                         elif tst_data["type"] == "NDRPDR":
309                             tbl_dict[tst_name_mod]["cmp-data"].append(
310                                 tst_data["throughput"]["PDR"]["LOWER"])
311                     elif table["include-tests"] == "NDR":
312                         if tst_data["type"] == "NDR":
313                             tbl_dict[tst_name_mod]["cmp-data"]. \
314                                 append(tst_data["throughput"]["value"])
315                         elif tst_data["type"] == "NDRPDR":
316                             tbl_dict[tst_name_mod]["cmp-data"].append(
317                                 tst_data["throughput"]["NDR"]["LOWER"])
318                     else:
319                         continue
320                 except KeyError:
321                     pass
322                 except TypeError:
323                     tbl_dict.pop(tst_name_mod, None)
324     if history:
325         for item in history:
326             for job, builds in item["data"].items():
327                 for build in builds:
328                     for tst_name, tst_data in data[job][str(build)].iteritems():
329                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
330                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
331                             replace("-ndrdisc", "").replace("-pdr", ""). \
332                             replace("-ndr", "").\
333                             replace("1t1c", "1c").replace("2t1c", "1c").\
334                             replace("2t2c", "2c").replace("4t2c", "2c").\
335                             replace("4t4c", "4c").replace("8t4c", "4c")
336                         if "across topologies" in table["title"].lower():
337                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
338                         if tbl_dict.get(tst_name_mod, None) is None:
339                             continue
340                         if tbl_dict[tst_name_mod].get("history", None) is None:
341                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
342                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
343                                                              None) is None:
344                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
345                                 list()
346                         try:
347                             # TODO: Re-work when NDRPDRDISC tests are not used
348                             if table["include-tests"] == "MRR":
349                                 tbl_dict[tst_name_mod]["history"][item["title"
350                                 ]].append(tst_data["result"]["receive-rate"].
351                                           avg)
352                             elif table["include-tests"] == "PDR":
353                                 if tst_data["type"] == "PDR":
354                                     tbl_dict[tst_name_mod]["history"][
355                                         item["title"]].\
356                                         append(tst_data["throughput"]["value"])
357                                 elif tst_data["type"] == "NDRPDR":
358                                     tbl_dict[tst_name_mod]["history"][item[
359                                         "title"]].append(tst_data["throughput"][
360                                         "PDR"]["LOWER"])
361                             elif table["include-tests"] == "NDR":
362                                 if tst_data["type"] == "NDR":
363                                     tbl_dict[tst_name_mod]["history"][
364                                         item["title"]].\
365                                         append(tst_data["throughput"]["value"])
366                                 elif tst_data["type"] == "NDRPDR":
367                                     tbl_dict[tst_name_mod]["history"][item[
368                                         "title"]].append(tst_data["throughput"][
369                                         "NDR"]["LOWER"])
370                             else:
371                                 continue
372                         except (TypeError, KeyError):
373                             pass
374
375     tbl_lst = list()
376     for tst_name in tbl_dict.keys():
377         item = [tbl_dict[tst_name]["name"], ]
378         if history:
379             if tbl_dict[tst_name].get("history", None) is not None:
380                 for hist_data in tbl_dict[tst_name]["history"].values():
381                     if hist_data:
382                         item.append(round(mean(hist_data) / 1000000, 2))
383                         item.append(round(stdev(hist_data) / 1000000, 2))
384                     else:
385                         item.extend([None, None])
386             else:
387                 item.extend([None, None])
388         data_t = tbl_dict[tst_name]["ref-data"]
389         if data_t:
390             item.append(round(mean(data_t) / 1000000, 2))
391             item.append(round(stdev(data_t) / 1000000, 2))
392         else:
393             item.extend([None, None])
394         data_t = tbl_dict[tst_name]["cmp-data"]
395         if data_t:
396             item.append(round(mean(data_t) / 1000000, 2))
397             item.append(round(stdev(data_t) / 1000000, 2))
398         else:
399             item.extend([None, None])
400         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
401             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
402         if len(item) == len(header):
403             tbl_lst.append(item)
404
405     # Sort the table according to the relative change
406     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
407
408     # Generate csv tables:
409     csv_file = "{0}.csv".format(table["output-file"])
410     with open(csv_file, "w") as file_handler:
411         file_handler.write(header_str)
412         for test in tbl_lst:
413             file_handler.write(",".join([str(item) for item in test]) + "\n")
414
415     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
416
417
418 def table_performance_comparison_nic(table, input_data):
419     """Generate the table(s) with algorithm: table_performance_comparison
420     specified in the specification file.
421
422     :param table: Table to generate.
423     :param input_data: Data to process.
424     :type table: pandas.Series
425     :type input_data: InputData
426     """
427
428     logging.info("  Generating the table {0} ...".
429                  format(table.get("title", "")))
430
431     # Transform the data
432     logging.info("    Creating the data set for the {0} '{1}'.".
433                  format(table.get("type", ""), table.get("title", "")))
434     data = input_data.filter_data(table, continue_on_error=True)
435
436     # Prepare the header of the tables
437     try:
438         header = ["Test case", ]
439
440         if table["include-tests"] == "MRR":
441             hdr_param = "Receive Rate"
442         else:
443             hdr_param = "Throughput"
444
445         history = table.get("history", None)
446         if history:
447             for item in history:
448                 header.extend(
449                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
450                      "{0} Stdev [Mpps]".format(item["title"])])
451         header.extend(
452             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
453              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
454              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
455              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
456              "Delta [%]"])
457         header_str = ",".join(header) + "\n"
458     except (AttributeError, KeyError) as err:
459         logging.error("The model is invalid, missing parameter: {0}".
460                       format(err))
461         return
462
463     # Prepare data to the table:
464     tbl_dict = dict()
465     for job, builds in table["reference"]["data"].items():
466         for build in builds:
467             for tst_name, tst_data in data[job][str(build)].iteritems():
468                 if table["reference"]["nic"] not in tst_data["tags"]:
469                     continue
470                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
471                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
472                     replace("-ndrdisc", "").replace("-pdr", "").\
473                     replace("-ndr", "").\
474                     replace("1t1c", "1c").replace("2t1c", "1c").\
475                     replace("2t2c", "2c").replace("4t2c", "2c").\
476                     replace("4t4c", "4c").replace("8t4c", "4c")
477                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
478                 if "across topologies" in table["title"].lower():
479                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
480                 if tbl_dict.get(tst_name_mod, None) is None:
481                     name = "{0}".format("-".join(tst_data["name"].
482                                                  split("-")[:-1]))
483                     if "across testbeds" in table["title"].lower() or \
484                             "across topologies" in table["title"].lower():
485                         name = name.\
486                             replace("1t1c", "1c").replace("2t1c", "1c").\
487                             replace("2t2c", "2c").replace("4t2c", "2c").\
488                             replace("4t4c", "4c").replace("8t4c", "4c")
489                     tbl_dict[tst_name_mod] = {"name": name,
490                                               "ref-data": list(),
491                                               "cmp-data": list()}
492                 try:
493                     # TODO: Re-work when NDRPDRDISC tests are not used
494                     if table["include-tests"] == "MRR":
495                         tbl_dict[tst_name_mod]["ref-data"]. \
496                             append(tst_data["result"]["receive-rate"].avg)
497                     elif table["include-tests"] == "PDR":
498                         if tst_data["type"] == "PDR":
499                             tbl_dict[tst_name_mod]["ref-data"]. \
500                                 append(tst_data["throughput"]["value"])
501                         elif tst_data["type"] == "NDRPDR":
502                             tbl_dict[tst_name_mod]["ref-data"].append(
503                                 tst_data["throughput"]["PDR"]["LOWER"])
504                     elif table["include-tests"] == "NDR":
505                         if tst_data["type"] == "NDR":
506                             tbl_dict[tst_name_mod]["ref-data"]. \
507                                 append(tst_data["throughput"]["value"])
508                         elif tst_data["type"] == "NDRPDR":
509                             tbl_dict[tst_name_mod]["ref-data"].append(
510                                 tst_data["throughput"]["NDR"]["LOWER"])
511                     else:
512                         continue
513                 except TypeError:
514                     pass  # No data in output.xml for this test
515
516     for job, builds in table["compare"]["data"].items():
517         for build in builds:
518             for tst_name, tst_data in data[job][str(build)].iteritems():
519                 if table["compare"]["nic"] not in tst_data["tags"]:
520                     continue
521                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
522                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
523                     replace("-ndrdisc", "").replace("-pdr", ""). \
524                     replace("-ndr", "").\
525                     replace("1t1c", "1c").replace("2t1c", "1c").\
526                     replace("2t2c", "2c").replace("4t2c", "2c").\
527                     replace("4t4c", "4c").replace("8t4c", "4c")
528                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
529                 if "across topologies" in table["title"].lower():
530                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
531                 if tbl_dict.get(tst_name_mod, None) is None:
532                     name = "{0}".format("-".join(tst_data["name"].
533                                                  split("-")[:-1]))
534                     if "across testbeds" in table["title"].lower() or \
535                             "across topologies" in table["title"].lower():
536                         name = name.\
537                             replace("1t1c", "1c").replace("2t1c", "1c").\
538                             replace("2t2c", "2c").replace("4t2c", "2c").\
539                             replace("4t4c", "4c").replace("8t4c", "4c")
540                     tbl_dict[tst_name_mod] = {"name": name,
541                                               "ref-data": list(),
542                                               "cmp-data": list()}
543                 try:
544                     # TODO: Re-work when NDRPDRDISC tests are not used
545                     if table["include-tests"] == "MRR":
546                         tbl_dict[tst_name_mod]["cmp-data"]. \
547                             append(tst_data["result"]["receive-rate"].avg)
548                     elif table["include-tests"] == "PDR":
549                         if tst_data["type"] == "PDR":
550                             tbl_dict[tst_name_mod]["cmp-data"]. \
551                                 append(tst_data["throughput"]["value"])
552                         elif tst_data["type"] == "NDRPDR":
553                             tbl_dict[tst_name_mod]["cmp-data"].append(
554                                 tst_data["throughput"]["PDR"]["LOWER"])
555                     elif table["include-tests"] == "NDR":
556                         if tst_data["type"] == "NDR":
557                             tbl_dict[tst_name_mod]["cmp-data"]. \
558                                 append(tst_data["throughput"]["value"])
559                         elif tst_data["type"] == "NDRPDR":
560                             tbl_dict[tst_name_mod]["cmp-data"].append(
561                                 tst_data["throughput"]["NDR"]["LOWER"])
562                     else:
563                         continue
564                 except (KeyError, TypeError):
565                     pass
566
567     if history:
568         for item in history:
569             for job, builds in item["data"].items():
570                 for build in builds:
571                     for tst_name, tst_data in data[job][str(build)].iteritems():
572                         if item["nic"] not in tst_data["tags"]:
573                             continue
574                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
575                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
576                             replace("-ndrdisc", "").replace("-pdr", ""). \
577                             replace("-ndr", "").\
578                             replace("1t1c", "1c").replace("2t1c", "1c").\
579                             replace("2t2c", "2c").replace("4t2c", "2c").\
580                             replace("4t4c", "4c").replace("8t4c", "4c")
581                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
582                         if "across topologies" in table["title"].lower():
583                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
584                         if tbl_dict.get(tst_name_mod, None) is None:
585                             continue
586                         if tbl_dict[tst_name_mod].get("history", None) is None:
587                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
588                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
589                                                              None) is None:
590                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
591                                 list()
592                         try:
593                             # TODO: Re-work when NDRPDRDISC tests are not used
594                             if table["include-tests"] == "MRR":
595                                 tbl_dict[tst_name_mod]["history"][item["title"
596                                 ]].append(tst_data["result"]["receive-rate"].
597                                           avg)
598                             elif table["include-tests"] == "PDR":
599                                 if tst_data["type"] == "PDR":
600                                     tbl_dict[tst_name_mod]["history"][
601                                         item["title"]].\
602                                         append(tst_data["throughput"]["value"])
603                                 elif tst_data["type"] == "NDRPDR":
604                                     tbl_dict[tst_name_mod]["history"][item[
605                                         "title"]].append(tst_data["throughput"][
606                                         "PDR"]["LOWER"])
607                             elif table["include-tests"] == "NDR":
608                                 if tst_data["type"] == "NDR":
609                                     tbl_dict[tst_name_mod]["history"][
610                                         item["title"]].\
611                                         append(tst_data["throughput"]["value"])
612                                 elif tst_data["type"] == "NDRPDR":
613                                     tbl_dict[tst_name_mod]["history"][item[
614                                         "title"]].append(tst_data["throughput"][
615                                         "NDR"]["LOWER"])
616                             else:
617                                 continue
618                         except (TypeError, KeyError):
619                             pass
620
621     tbl_lst = list()
622     for tst_name in tbl_dict.keys():
623         item = [tbl_dict[tst_name]["name"], ]
624         if history:
625             if tbl_dict[tst_name].get("history", None) is not None:
626                 for hist_data in tbl_dict[tst_name]["history"].values():
627                     if hist_data:
628                         item.append(round(mean(hist_data) / 1000000, 2))
629                         item.append(round(stdev(hist_data) / 1000000, 2))
630                     else:
631                         item.extend([None, None])
632             else:
633                 item.extend([None, None])
634         data_t = tbl_dict[tst_name]["ref-data"]
635         if data_t:
636             item.append(round(mean(data_t) / 1000000, 2))
637             item.append(round(stdev(data_t) / 1000000, 2))
638         else:
639             item.extend([None, None])
640         data_t = tbl_dict[tst_name]["cmp-data"]
641         if data_t:
642             item.append(round(mean(data_t) / 1000000, 2))
643             item.append(round(stdev(data_t) / 1000000, 2))
644         else:
645             item.extend([None, None])
646         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
647             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
648         else:
649             item.append(None)
650         if len(item) == len(header):
651             tbl_lst.append(item)
652
653     # Sort the table according to the relative change
654     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
655
656     # Generate csv tables:
657     csv_file = "{0}.csv".format(table["output-file"])
658     with open(csv_file, "w") as file_handler:
659         file_handler.write(header_str)
660         for test in tbl_lst:
661             file_handler.write(",".join([str(item) for item in test]) + "\n")
662
663     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
664
665
666 def table_nics_comparison(table, input_data):
667     """Generate the table(s) with algorithm: table_nics_comparison
668     specified in the specification file.
669
670     :param table: Table to generate.
671     :param input_data: Data to process.
672     :type table: pandas.Series
673     :type input_data: InputData
674     """
675
676     logging.info("  Generating the table {0} ...".
677                  format(table.get("title", "")))
678
679     # Transform the data
680     logging.info("    Creating the data set for the {0} '{1}'.".
681                  format(table.get("type", ""), table.get("title", "")))
682     data = input_data.filter_data(table, continue_on_error=True)
683
684     # Prepare the header of the tables
685     try:
686         header = ["Test case", ]
687
688         if table["include-tests"] == "MRR":
689             hdr_param = "Receive Rate"
690         else:
691             hdr_param = "Throughput"
692
693         header.extend(
694             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
695              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
696              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
697              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
698              "Delta [%]"])
699         header_str = ",".join(header) + "\n"
700     except (AttributeError, KeyError) as err:
701         logging.error("The model is invalid, missing parameter: {0}".
702                       format(err))
703         return
704
705     # Prepare data to the table:
706     tbl_dict = dict()
707     for job, builds in table["data"].items():
708         for build in builds:
709             for tst_name, tst_data in data[job][str(build)].iteritems():
710                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
711                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
712                     replace("-ndrdisc", "").replace("-pdr", "").\
713                     replace("-ndr", "").\
714                     replace("1t1c", "1c").replace("2t1c", "1c").\
715                     replace("2t2c", "2c").replace("4t2c", "2c").\
716                     replace("4t4c", "4c").replace("8t4c", "4c")
717                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
718                 if tbl_dict.get(tst_name_mod, None) is None:
719                     name = "-".join(tst_data["name"].split("-")[:-1])
720                     tbl_dict[tst_name_mod] = {"name": name,
721                                               "ref-data": list(),
722                                               "cmp-data": list()}
723                 try:
724                     if table["include-tests"] == "MRR":
725                         result = tst_data["result"]["receive-rate"].avg
726                     elif table["include-tests"] == "PDR":
727                         result = tst_data["throughput"]["PDR"]["LOWER"]
728                     elif table["include-tests"] == "NDR":
729                         result = tst_data["throughput"]["NDR"]["LOWER"]
730                     else:
731                         result = None
732
733                     if result:
734                         if table["reference"]["nic"] in tst_data["tags"]:
735                             tbl_dict[tst_name_mod]["ref-data"].append(result)
736                         elif table["compare"]["nic"] in tst_data["tags"]:
737                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
738                 except (TypeError, KeyError) as err:
739                     logging.debug("No data for {0}".format(tst_name))
740                     logging.debug(repr(err))
741                     # No data in output.xml for this test
742
743     tbl_lst = list()
744     for tst_name in tbl_dict.keys():
745         item = [tbl_dict[tst_name]["name"], ]
746         data_t = tbl_dict[tst_name]["ref-data"]
747         if data_t:
748             item.append(round(mean(data_t) / 1000000, 2))
749             item.append(round(stdev(data_t) / 1000000, 2))
750         else:
751             item.extend([None, None])
752         data_t = tbl_dict[tst_name]["cmp-data"]
753         if data_t:
754             item.append(round(mean(data_t) / 1000000, 2))
755             item.append(round(stdev(data_t) / 1000000, 2))
756         else:
757             item.extend([None, None])
758         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
759             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
760         if len(item) == len(header):
761             tbl_lst.append(item)
762
763     # Sort the table according to the relative change
764     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
765
766     # Generate csv tables:
767     csv_file = "{0}.csv".format(table["output-file"])
768     with open(csv_file, "w") as file_handler:
769         file_handler.write(header_str)
770         for test in tbl_lst:
771             file_handler.write(",".join([str(item) for item in test]) + "\n")
772
773     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
774
775
776 def table_soak_vs_ndr(table, input_data):
777     """Generate the table(s) with algorithm: table_soak_vs_ndr
778     specified in the specification file.
779
780     :param table: Table to generate.
781     :param input_data: Data to process.
782     :type table: pandas.Series
783     :type input_data: InputData
784     """
785
786     logging.info("  Generating the table {0} ...".
787                  format(table.get("title", "")))
788
789     # Transform the data
790     logging.info("    Creating the data set for the {0} '{1}'.".
791                  format(table.get("type", ""), table.get("title", "")))
792     data = input_data.filter_data(table, continue_on_error=True)
793
794     # Prepare the header of the table
795     try:
796         header = [
797             "Test case",
798             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
799             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
800             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
801             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
802             "Delta [%]", "Stdev of delta [%]"]
803         header_str = ",".join(header) + "\n"
804     except (AttributeError, KeyError) as err:
805         logging.error("The model is invalid, missing parameter: {0}".
806                       format(err))
807         return
808
809     # Create a list of available SOAK test results:
810     tbl_dict = dict()
811     for job, builds in table["compare"]["data"].items():
812         for build in builds:
813             for tst_name, tst_data in data[job][str(build)].iteritems():
814                 if tst_data["type"] == "SOAK":
815                     tst_name_mod = tst_name.replace("-soak", "")
816                     if tbl_dict.get(tst_name_mod, None) is None:
817                         groups = re.search(REGEX_NIC, tst_data["parent"])
818                         nic = groups.group(0) if groups else ""
819                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
820                                                               split("-")[:-1]))
821                         tbl_dict[tst_name_mod] = {
822                             "name": name,
823                             "ref-data": list(),
824                             "cmp-data": list()
825                         }
826                     try:
827                         tbl_dict[tst_name_mod]["cmp-data"].append(
828                             tst_data["throughput"]["LOWER"])
829                     except (KeyError, TypeError):
830                         pass
831     tests_lst = tbl_dict.keys()
832
833     # Add corresponding NDR test results:
834     for job, builds in table["reference"]["data"].items():
835         for build in builds:
836             for tst_name, tst_data in data[job][str(build)].iteritems():
837                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
838                     replace("-mrr", "")
839                 if tst_name_mod in tests_lst:
840                     try:
841                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
842                             if table["include-tests"] == "MRR":
843                                 result = tst_data["result"]["receive-rate"].avg
844                             elif table["include-tests"] == "PDR":
845                                 result = tst_data["throughput"]["PDR"]["LOWER"]
846                             elif table["include-tests"] == "NDR":
847                                 result = tst_data["throughput"]["NDR"]["LOWER"]
848                             else:
849                                 result = None
850                             if result is not None:
851                                 tbl_dict[tst_name_mod]["ref-data"].append(
852                                     result)
853                     except (KeyError, TypeError):
854                         continue
855
856     tbl_lst = list()
857     for tst_name in tbl_dict.keys():
858         item = [tbl_dict[tst_name]["name"], ]
859         data_r = tbl_dict[tst_name]["ref-data"]
860         if data_r:
861             data_r_mean = mean(data_r)
862             item.append(round(data_r_mean / 1000000, 2))
863             data_r_stdev = stdev(data_r)
864             item.append(round(data_r_stdev / 1000000, 2))
865         else:
866             data_r_mean = None
867             data_r_stdev = None
868             item.extend([None, None])
869         data_c = tbl_dict[tst_name]["cmp-data"]
870         if data_c:
871             data_c_mean = mean(data_c)
872             item.append(round(data_c_mean / 1000000, 2))
873             data_c_stdev = stdev(data_c)
874             item.append(round(data_c_stdev / 1000000, 2))
875         else:
876             data_c_mean = None
877             data_c_stdev = None
878             item.extend([None, None])
879         if data_r_mean and data_c_mean:
880             delta, d_stdev = relative_change_stdev(
881                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
882             item.append(round(delta, 2))
883             item.append(round(d_stdev, 2))
884             tbl_lst.append(item)
885
886     # Sort the table according to the relative change
887     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
888
889     # Generate csv tables:
890     csv_file = "{0}.csv".format(table["output-file"])
891     with open(csv_file, "w") as file_handler:
892         file_handler.write(header_str)
893         for test in tbl_lst:
894             file_handler.write(",".join([str(item) for item in test]) + "\n")
895
896     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
897
898
899 def table_performance_trending_dashboard(table, input_data):
900     """Generate the table(s) with algorithm:
901     table_performance_trending_dashboard
902     specified in the specification file.
903
904     :param table: Table to generate.
905     :param input_data: Data to process.
906     :type table: pandas.Series
907     :type input_data: InputData
908     """
909
910     logging.info("  Generating the table {0} ...".
911                  format(table.get("title", "")))
912
913     # Transform the data
914     logging.info("    Creating the data set for the {0} '{1}'.".
915                  format(table.get("type", ""), table.get("title", "")))
916     data = input_data.filter_data(table, continue_on_error=True)
917
918     # Prepare the header of the tables
919     header = ["Test Case",
920               "Trend [Mpps]",
921               "Short-Term Change [%]",
922               "Long-Term Change [%]",
923               "Regressions [#]",
924               "Progressions [#]"
925               ]
926     header_str = ",".join(header) + "\n"
927
928     # Prepare data to the table:
929     tbl_dict = dict()
930     for job, builds in table["data"].items():
931         for build in builds:
932             for tst_name, tst_data in data[job][str(build)].iteritems():
933                 if tst_name.lower() in table.get("ignore-list", list()):
934                     continue
935                 if tbl_dict.get(tst_name, None) is None:
936                     groups = re.search(REGEX_NIC, tst_data["parent"])
937                     if not groups:
938                         continue
939                     nic = groups.group(0)
940                     tbl_dict[tst_name] = {
941                         "name": "{0}-{1}".format(nic, tst_data["name"]),
942                         "data": OrderedDict()}
943                 try:
944                     tbl_dict[tst_name]["data"][str(build)] = \
945                         tst_data["result"]["receive-rate"]
946                 except (TypeError, KeyError):
947                     pass  # No data in output.xml for this test
948
949     tbl_lst = list()
950     for tst_name in tbl_dict.keys():
951         data_t = tbl_dict[tst_name]["data"]
952         if len(data_t) < 2:
953             continue
954
955         classification_lst, avgs = classify_anomalies(data_t)
956
957         win_size = min(len(data_t), table["window"])
958         long_win_size = min(len(data_t), table["long-trend-window"])
959
960         try:
961             max_long_avg = max(
962                 [x for x in avgs[-long_win_size:-win_size]
963                  if not isnan(x)])
964         except ValueError:
965             max_long_avg = nan
966         last_avg = avgs[-1]
967         avg_week_ago = avgs[max(-win_size, -len(avgs))]
968
969         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
970             rel_change_last = nan
971         else:
972             rel_change_last = round(
973                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
974
975         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
976             rel_change_long = nan
977         else:
978             rel_change_long = round(
979                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
980
981         if classification_lst:
982             if isnan(rel_change_last) and isnan(rel_change_long):
983                 continue
984             if (isnan(last_avg) or
985                 isnan(rel_change_last) or
986                 isnan(rel_change_long)):
987                 continue
988             tbl_lst.append(
989                 [tbl_dict[tst_name]["name"],
990                  round(last_avg / 1000000, 2),
991                  rel_change_last,
992                  rel_change_long,
993                  classification_lst[-win_size:].count("regression"),
994                  classification_lst[-win_size:].count("progression")])
995
996     tbl_lst.sort(key=lambda rel: rel[0])
997
998     tbl_sorted = list()
999     for nrr in range(table["window"], -1, -1):
1000         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1001         for nrp in range(table["window"], -1, -1):
1002             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1003             tbl_out.sort(key=lambda rel: rel[2])
1004             tbl_sorted.extend(tbl_out)
1005
1006     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1007
1008     logging.info("    Writing file: '{0}'".format(file_name))
1009     with open(file_name, "w") as file_handler:
1010         file_handler.write(header_str)
1011         for test in tbl_sorted:
1012             file_handler.write(",".join([str(item) for item in test]) + '\n')
1013
1014     txt_file_name = "{0}.txt".format(table["output-file"])
1015     logging.info("    Writing file: '{0}'".format(txt_file_name))
1016     convert_csv_to_pretty_txt(file_name, txt_file_name)
1017
1018
1019 def _generate_url(base, testbed, test_name):
1020     """Generate URL to a trending plot from the name of the test case.
1021
1022     :param base: The base part of URL common to all test cases.
1023     :param testbed: The testbed used for testing.
1024     :param test_name: The name of the test case.
1025     :type base: str
1026     :type testbed: str
1027     :type test_name: str
1028     :returns: The URL to the plot with the trending data for the given test
1029         case.
1030     :rtype str
1031     """
1032
1033     url = base
1034     file_name = ""
1035     anchor = ".html#"
1036     feature = ""
1037
1038     if "lbdpdk" in test_name or "lbvpp" in test_name:
1039         file_name = "link_bonding"
1040
1041     elif "114b" in test_name and "vhost" in test_name:
1042         file_name = "vts"
1043
1044     elif "testpmd" in test_name or "l3fwd" in test_name:
1045         file_name = "dpdk"
1046
1047     elif "memif" in test_name:
1048         file_name = "container_memif"
1049         feature = "-base"
1050
1051     elif "srv6" in test_name:
1052         file_name = "srv6"
1053
1054     elif "vhost" in test_name:
1055         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1056             file_name = "vm_vhost_l2"
1057             if "114b" in test_name:
1058                 feature = ""
1059             elif "l2xcbase" in test_name and "x520" in test_name:
1060                 feature = "-base-l2xc"
1061             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1062                 feature = "-base-l2bd"
1063             else:
1064                 feature = "-base"
1065         elif "ip4base" in test_name:
1066             file_name = "vm_vhost_ip4"
1067             feature = "-base"
1068
1069     elif "ipsecbasetnlsw" in test_name:
1070         file_name = "ipsecsw"
1071         feature = "-base-scale"
1072
1073     elif "ipsec" in test_name:
1074         file_name = "ipsec"
1075         feature = "-base-scale"
1076         if "hw-" in test_name:
1077             file_name = "ipsechw"
1078         elif "sw-" in test_name:
1079             file_name = "ipsecsw"
1080         if "-int-" in test_name:
1081             feature = "-base-scale-int"
1082         elif "tnl" in test_name:
1083             feature = "-base-scale-tnl"
1084
1085     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1086         file_name = "ip4_tunnels"
1087         feature = "-base"
1088
1089     elif "ip4base" in test_name or "ip4scale" in test_name:
1090         file_name = "ip4"
1091         if "xl710" in test_name:
1092             feature = "-base-scale-features"
1093         elif "iacl" in test_name:
1094             feature = "-features-iacl"
1095         elif "oacl" in test_name:
1096             feature = "-features-oacl"
1097         elif "snat" in test_name or "cop" in test_name:
1098             feature = "-features"
1099         else:
1100             feature = "-base-scale"
1101
1102     elif "ip6base" in test_name or "ip6scale" in test_name:
1103         file_name = "ip6"
1104         feature = "-base-scale"
1105
1106     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1107             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1108             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1109         file_name = "l2"
1110         if "macip" in test_name:
1111             feature = "-features-macip"
1112         elif "iacl" in test_name:
1113             feature = "-features-iacl"
1114         elif "oacl" in test_name:
1115             feature = "-features-oacl"
1116         else:
1117             feature = "-base-scale"
1118
1119     if "x520" in test_name:
1120         nic = "x520-"
1121     elif "x710" in test_name:
1122         nic = "x710-"
1123     elif "xl710" in test_name:
1124         nic = "xl710-"
1125     elif "xxv710" in test_name:
1126         nic = "xxv710-"
1127     elif "vic1227" in test_name:
1128         nic = "vic1227-"
1129     elif "vic1385" in test_name:
1130         nic = "vic1385-"
1131     elif "x553" in test_name:
1132         nic = "x553-"
1133     else:
1134         nic = ""
1135     anchor += nic
1136
1137     if "64b" in test_name:
1138         framesize = "64b"
1139     elif "78b" in test_name:
1140         framesize = "78b"
1141     elif "imix" in test_name:
1142         framesize = "imix"
1143     elif "9000b" in test_name:
1144         framesize = "9000b"
1145     elif "1518b" in test_name:
1146         framesize = "1518b"
1147     elif "114b" in test_name:
1148         framesize = "114b"
1149     else:
1150         framesize = ""
1151     anchor += framesize + '-'
1152
1153     if "1t1c" in test_name:
1154         anchor += "1t1c"
1155     elif "2t2c" in test_name:
1156         anchor += "2t2c"
1157     elif "4t4c" in test_name:
1158         anchor += "4t4c"
1159     elif "2t1c" in test_name:
1160         anchor += "2t1c"
1161     elif "4t2c" in test_name:
1162         anchor += "4t2c"
1163     elif "8t4c" in test_name:
1164         anchor += "8t4c"
1165
1166     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1167         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1168
1169
1170 def table_performance_trending_dashboard_html(table, input_data):
1171     """Generate the table(s) with algorithm:
1172     table_performance_trending_dashboard_html specified in the specification
1173     file.
1174
1175     :param table: Table to generate.
1176     :param input_data: Data to process.
1177     :type table: dict
1178     :type input_data: InputData
1179     """
1180
1181     testbed = table.get("testbed", None)
1182     if testbed is None:
1183         logging.error("The testbed is not defined for the table '{0}'.".
1184                       format(table.get("title", "")))
1185         return
1186
1187     logging.info("  Generating the table {0} ...".
1188                  format(table.get("title", "")))
1189
1190     try:
1191         with open(table["input-file"], 'rb') as csv_file:
1192             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1193             csv_lst = [item for item in csv_content]
1194     except KeyError:
1195         logging.warning("The input file is not defined.")
1196         return
1197     except csv.Error as err:
1198         logging.warning("Not possible to process the file '{0}'.\n{1}".
1199                         format(table["input-file"], err))
1200         return
1201
1202     # Table:
1203     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1204
1205     # Table header:
1206     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1207     for idx, item in enumerate(csv_lst[0]):
1208         alignment = "left" if idx == 0 else "center"
1209         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1210         th.text = item
1211
1212     # Rows:
1213     colors = {"regression": ("#ffcccc", "#ff9999"),
1214               "progression": ("#c6ecc6", "#9fdf9f"),
1215               "normal": ("#e9f1fb", "#d4e4f7")}
1216     for r_idx, row in enumerate(csv_lst[1:]):
1217         if int(row[4]):
1218             color = "regression"
1219         elif int(row[5]):
1220             color = "progression"
1221         else:
1222             color = "normal"
1223         background = colors[color][r_idx % 2]
1224         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1225
1226         # Columns:
1227         for c_idx, item in enumerate(row):
1228             alignment = "left" if c_idx == 0 else "center"
1229             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1230             # Name:
1231             if c_idx == 0:
1232                 url = _generate_url("../trending/", testbed, item)
1233                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1234                 ref.text = item
1235             else:
1236                 td.text = item
1237     try:
1238         with open(table["output-file"], 'w') as html_file:
1239             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1240             html_file.write(".. raw:: html\n\n\t")
1241             html_file.write(ET.tostring(dashboard))
1242             html_file.write("\n\t<p><br><br></p>\n")
1243     except KeyError:
1244         logging.warning("The output file is not defined.")
1245         return
1246
1247
1248 def table_last_failed_tests(table, input_data):
1249     """Generate the table(s) with algorithm: table_last_failed_tests
1250     specified in the specification file.
1251
1252     :param table: Table to generate.
1253     :param input_data: Data to process.
1254     :type table: pandas.Series
1255     :type input_data: InputData
1256     """
1257
1258     logging.info("  Generating the table {0} ...".
1259                  format(table.get("title", "")))
1260
1261     # Transform the data
1262     logging.info("    Creating the data set for the {0} '{1}'.".
1263                  format(table.get("type", ""), table.get("title", "")))
1264     data = input_data.filter_data(table, continue_on_error=True)
1265
1266     if data is None or data.empty:
1267         logging.warn("    No data for the {0} '{1}'.".
1268                      format(table.get("type", ""), table.get("title", "")))
1269         return
1270
1271     tbl_list = list()
1272     for job, builds in table["data"].items():
1273         for build in builds:
1274             build = str(build)
1275             try:
1276                 version = input_data.metadata(job, build).get("version", "")
1277             except KeyError:
1278                 logging.error("Data for {job}: {build} is not present.".
1279                               format(job=job, build=build))
1280                 return
1281             tbl_list.append(build)
1282             tbl_list.append(version)
1283             for tst_name, tst_data in data[job][build].iteritems():
1284                 if tst_data["status"] != "FAIL":
1285                     continue
1286                 groups = re.search(REGEX_NIC, tst_data["parent"])
1287                 if not groups:
1288                     continue
1289                 nic = groups.group(0)
1290                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1291
1292     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1293     logging.info("    Writing file: '{0}'".format(file_name))
1294     with open(file_name, "w") as file_handler:
1295         for test in tbl_list:
1296             file_handler.write(test + '\n')
1297
1298
1299 def table_failed_tests(table, input_data):
1300     """Generate the table(s) with algorithm: table_failed_tests
1301     specified in the specification file.
1302
1303     :param table: Table to generate.
1304     :param input_data: Data to process.
1305     :type table: pandas.Series
1306     :type input_data: InputData
1307     """
1308
1309     logging.info("  Generating the table {0} ...".
1310                  format(table.get("title", "")))
1311
1312     # Transform the data
1313     logging.info("    Creating the data set for the {0} '{1}'.".
1314                  format(table.get("type", ""), table.get("title", "")))
1315     data = input_data.filter_data(table, continue_on_error=True)
1316
1317     # Prepare the header of the tables
1318     header = ["Test Case",
1319               "Failures [#]",
1320               "Last Failure [Time]",
1321               "Last Failure [VPP-Build-Id]",
1322               "Last Failure [CSIT-Job-Build-Id]"]
1323
1324     # Generate the data for the table according to the model in the table
1325     # specification
1326
1327     now = dt.utcnow()
1328     timeperiod = timedelta(int(table.get("window", 7)))
1329
1330     tbl_dict = dict()
1331     for job, builds in table["data"].items():
1332         for build in builds:
1333             build = str(build)
1334             for tst_name, tst_data in data[job][build].iteritems():
1335                 if tst_name.lower() in table.get("ignore-list", list()):
1336                     continue
1337                 if tbl_dict.get(tst_name, None) is None:
1338                     groups = re.search(REGEX_NIC, tst_data["parent"])
1339                     if not groups:
1340                         continue
1341                     nic = groups.group(0)
1342                     tbl_dict[tst_name] = {
1343                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1344                         "data": OrderedDict()}
1345                 try:
1346                     generated = input_data.metadata(job, build).\
1347                         get("generated", "")
1348                     if not generated:
1349                         continue
1350                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1351                     if (now - then) <= timeperiod:
1352                         tbl_dict[tst_name]["data"][build] = (
1353                             tst_data["status"],
1354                             generated,
1355                             input_data.metadata(job, build).get("version", ""),
1356                             build)
1357                 except (TypeError, KeyError) as err:
1358                     logging.warning("tst_name: {} - err: {}".
1359                                     format(tst_name, repr(err)))
1360
1361     max_fails = 0
1362     tbl_lst = list()
1363     for tst_data in tbl_dict.values():
1364         fails_nr = 0
1365         for val in tst_data["data"].values():
1366             if val[0] == "FAIL":
1367                 fails_nr += 1
1368                 fails_last_date = val[1]
1369                 fails_last_vpp = val[2]
1370                 fails_last_csit = val[3]
1371         if fails_nr:
1372             max_fails = fails_nr if fails_nr > max_fails else max_fails
1373             tbl_lst.append([tst_data["name"],
1374                             fails_nr,
1375                             fails_last_date,
1376                             fails_last_vpp,
1377                             "mrr-daily-build-{0}".format(fails_last_csit)])
1378
1379     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1380     tbl_sorted = list()
1381     for nrf in range(max_fails, -1, -1):
1382         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1383         tbl_sorted.extend(tbl_fails)
1384     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1385
1386     logging.info("    Writing file: '{0}'".format(file_name))
1387     with open(file_name, "w") as file_handler:
1388         file_handler.write(",".join(header) + "\n")
1389         for test in tbl_sorted:
1390             file_handler.write(",".join([str(item) for item in test]) + '\n')
1391
1392     txt_file_name = "{0}.txt".format(table["output-file"])
1393     logging.info("    Writing file: '{0}'".format(txt_file_name))
1394     convert_csv_to_pretty_txt(file_name, txt_file_name)
1395
1396
1397 def table_failed_tests_html(table, input_data):
1398     """Generate the table(s) with algorithm: table_failed_tests_html
1399     specified in the specification file.
1400
1401     :param table: Table to generate.
1402     :param input_data: Data to process.
1403     :type table: pandas.Series
1404     :type input_data: InputData
1405     """
1406
1407     testbed = table.get("testbed", None)
1408     if testbed is None:
1409         logging.error("The testbed is not defined for the table '{0}'.".
1410                       format(table.get("title", "")))
1411         return
1412
1413     logging.info("  Generating the table {0} ...".
1414                  format(table.get("title", "")))
1415
1416     try:
1417         with open(table["input-file"], 'rb') as csv_file:
1418             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1419             csv_lst = [item for item in csv_content]
1420     except KeyError:
1421         logging.warning("The input file is not defined.")
1422         return
1423     except csv.Error as err:
1424         logging.warning("Not possible to process the file '{0}'.\n{1}".
1425                         format(table["input-file"], err))
1426         return
1427
1428     # Table:
1429     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1430
1431     # Table header:
1432     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1433     for idx, item in enumerate(csv_lst[0]):
1434         alignment = "left" if idx == 0 else "center"
1435         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1436         th.text = item
1437
1438     # Rows:
1439     colors = ("#e9f1fb", "#d4e4f7")
1440     for r_idx, row in enumerate(csv_lst[1:]):
1441         background = colors[r_idx % 2]
1442         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1443
1444         # Columns:
1445         for c_idx, item in enumerate(row):
1446             alignment = "left" if c_idx == 0 else "center"
1447             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1448             # Name:
1449             if c_idx == 0:
1450                 url = _generate_url("../trending/", testbed, item)
1451                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1452                 ref.text = item
1453             else:
1454                 td.text = item
1455     try:
1456         with open(table["output-file"], 'w') as html_file:
1457             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1458             html_file.write(".. raw:: html\n\n\t")
1459             html_file.write(ET.tostring(failed_tests))
1460             html_file.write("\n\t<p><br><br></p>\n")
1461     except KeyError:
1462         logging.warning("The output file is not defined.")
1463         return