Report: Tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 try:
300                     # TODO: Re-work when NDRPDRDISC tests are not used
301                     if table["include-tests"] == "MRR":
302                         tbl_dict[tst_name_mod]["cmp-data"]. \
303                             append(tst_data["result"]["receive-rate"].avg)
304                     elif table["include-tests"] == "PDR":
305                         if tst_data["type"] == "PDR":
306                             tbl_dict[tst_name_mod]["cmp-data"]. \
307                                 append(tst_data["throughput"]["value"])
308                         elif tst_data["type"] == "NDRPDR":
309                             tbl_dict[tst_name_mod]["cmp-data"].append(
310                                 tst_data["throughput"]["PDR"]["LOWER"])
311                     elif table["include-tests"] == "NDR":
312                         if tst_data["type"] == "NDR":
313                             tbl_dict[tst_name_mod]["cmp-data"]. \
314                                 append(tst_data["throughput"]["value"])
315                         elif tst_data["type"] == "NDRPDR":
316                             tbl_dict[tst_name_mod]["cmp-data"].append(
317                                 tst_data["throughput"]["NDR"]["LOWER"])
318                     else:
319                         continue
320                 except KeyError:
321                     pass
322                 except TypeError:
323                     tbl_dict.pop(tst_name_mod, None)
324     if history:
325         for item in history:
326             for job, builds in item["data"].items():
327                 for build in builds:
328                     for tst_name, tst_data in data[job][str(build)].iteritems():
329                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
330                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
331                             replace("-ndrdisc", "").replace("-pdr", ""). \
332                             replace("-ndr", "").\
333                             replace("1t1c", "1c").replace("2t1c", "1c").\
334                             replace("2t2c", "2c").replace("4t2c", "2c").\
335                             replace("4t4c", "4c").replace("8t4c", "4c")
336                         if "across topologies" in table["title"].lower():
337                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
338                         if tbl_dict.get(tst_name_mod, None) is None:
339                             continue
340                         if tbl_dict[tst_name_mod].get("history", None) is None:
341                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
342                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
343                                                              None) is None:
344                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
345                                 list()
346                         try:
347                             # TODO: Re-work when NDRPDRDISC tests are not used
348                             if table["include-tests"] == "MRR":
349                                 tbl_dict[tst_name_mod]["history"][item["title"
350                                 ]].append(tst_data["result"]["receive-rate"].
351                                           avg)
352                             elif table["include-tests"] == "PDR":
353                                 if tst_data["type"] == "PDR":
354                                     tbl_dict[tst_name_mod]["history"][
355                                         item["title"]].\
356                                         append(tst_data["throughput"]["value"])
357                                 elif tst_data["type"] == "NDRPDR":
358                                     tbl_dict[tst_name_mod]["history"][item[
359                                         "title"]].append(tst_data["throughput"][
360                                         "PDR"]["LOWER"])
361                             elif table["include-tests"] == "NDR":
362                                 if tst_data["type"] == "NDR":
363                                     tbl_dict[tst_name_mod]["history"][
364                                         item["title"]].\
365                                         append(tst_data["throughput"]["value"])
366                                 elif tst_data["type"] == "NDRPDR":
367                                     tbl_dict[tst_name_mod]["history"][item[
368                                         "title"]].append(tst_data["throughput"][
369                                         "NDR"]["LOWER"])
370                             else:
371                                 continue
372                         except (TypeError, KeyError):
373                             pass
374
375     tbl_lst = list()
376     for tst_name in tbl_dict.keys():
377         item = [tbl_dict[tst_name]["name"], ]
378         if history:
379             if tbl_dict[tst_name].get("history", None) is not None:
380                 for hist_data in tbl_dict[tst_name]["history"].values():
381                     if hist_data:
382                         item.append(round(mean(hist_data) / 1000000, 2))
383                         item.append(round(stdev(hist_data) / 1000000, 2))
384                     else:
385                         item.extend([None, None])
386             else:
387                 item.extend([None, None])
388         data_t = tbl_dict[tst_name]["ref-data"]
389         if data_t:
390             item.append(round(mean(data_t) / 1000000, 2))
391             item.append(round(stdev(data_t) / 1000000, 2))
392         else:
393             item.extend([None, None])
394         data_t = tbl_dict[tst_name]["cmp-data"]
395         if data_t:
396             item.append(round(mean(data_t) / 1000000, 2))
397             item.append(round(stdev(data_t) / 1000000, 2))
398         else:
399             item.extend([None, None])
400         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
401             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
402         if len(item) == len(header):
403             tbl_lst.append(item)
404
405     # Sort the table according to the relative change
406     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
407
408     # Generate csv tables:
409     csv_file = "{0}.csv".format(table["output-file"])
410     with open(csv_file, "w") as file_handler:
411         file_handler.write(header_str)
412         for test in tbl_lst:
413             file_handler.write(",".join([str(item) for item in test]) + "\n")
414
415     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
416
417
418 def table_performance_comparison_nic(table, input_data):
419     """Generate the table(s) with algorithm: table_performance_comparison
420     specified in the specification file.
421
422     :param table: Table to generate.
423     :param input_data: Data to process.
424     :type table: pandas.Series
425     :type input_data: InputData
426     """
427
428     logging.info("  Generating the table {0} ...".
429                  format(table.get("title", "")))
430
431     # Transform the data
432     logging.info("    Creating the data set for the {0} '{1}'.".
433                  format(table.get("type", ""), table.get("title", "")))
434     data = input_data.filter_data(table, continue_on_error=True)
435
436     # Prepare the header of the tables
437     try:
438         header = ["Test case", ]
439
440         if table["include-tests"] == "MRR":
441             hdr_param = "Receive Rate"
442         else:
443             hdr_param = "Throughput"
444
445         history = table.get("history", None)
446         if history:
447             for item in history:
448                 header.extend(
449                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
450                      "{0} Stdev [Mpps]".format(item["title"])])
451         header.extend(
452             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
453              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
454              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
455              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
456              "Delta [%]"])
457         header_str = ",".join(header) + "\n"
458     except (AttributeError, KeyError) as err:
459         logging.error("The model is invalid, missing parameter: {0}".
460                       format(err))
461         return
462
463     # Prepare data to the table:
464     tbl_dict = dict()
465     for job, builds in table["reference"]["data"].items():
466         for build in builds:
467             for tst_name, tst_data in data[job][str(build)].iteritems():
468                 if table["reference"]["nic"] not in tst_data["tags"]:
469                     continue
470                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
471                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
472                     replace("-ndrdisc", "").replace("-pdr", "").\
473                     replace("-ndr", "").\
474                     replace("1t1c", "1c").replace("2t1c", "1c").\
475                     replace("2t2c", "2c").replace("4t2c", "2c").\
476                     replace("4t4c", "4c").replace("8t4c", "4c")
477                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
478                 if "across topologies" in table["title"].lower():
479                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
480                 if tbl_dict.get(tst_name_mod, None) is None:
481                     name = "{0}".format("-".join(tst_data["name"].
482                                                  split("-")[:-1]))
483                     if "across testbeds" in table["title"].lower() or \
484                             "across topologies" in table["title"].lower():
485                         name = name.\
486                             replace("1t1c", "1c").replace("2t1c", "1c").\
487                             replace("2t2c", "2c").replace("4t2c", "2c").\
488                             replace("4t4c", "4c").replace("8t4c", "4c")
489                     tbl_dict[tst_name_mod] = {"name": name,
490                                               "ref-data": list(),
491                                               "cmp-data": list()}
492                 try:
493                     # TODO: Re-work when NDRPDRDISC tests are not used
494                     if table["include-tests"] == "MRR":
495                         tbl_dict[tst_name_mod]["ref-data"]. \
496                             append(tst_data["result"]["receive-rate"].avg)
497                     elif table["include-tests"] == "PDR":
498                         if tst_data["type"] == "PDR":
499                             tbl_dict[tst_name_mod]["ref-data"]. \
500                                 append(tst_data["throughput"]["value"])
501                         elif tst_data["type"] == "NDRPDR":
502                             tbl_dict[tst_name_mod]["ref-data"].append(
503                                 tst_data["throughput"]["PDR"]["LOWER"])
504                     elif table["include-tests"] == "NDR":
505                         if tst_data["type"] == "NDR":
506                             tbl_dict[tst_name_mod]["ref-data"]. \
507                                 append(tst_data["throughput"]["value"])
508                         elif tst_data["type"] == "NDRPDR":
509                             tbl_dict[tst_name_mod]["ref-data"].append(
510                                 tst_data["throughput"]["NDR"]["LOWER"])
511                     else:
512                         continue
513                 except TypeError:
514                     pass  # No data in output.xml for this test
515
516     for job, builds in table["compare"]["data"].items():
517         for build in builds:
518             for tst_name, tst_data in data[job][str(build)].iteritems():
519                 if table["compare"]["nic"] not in tst_data["tags"]:
520                     continue
521                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
522                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
523                     replace("-ndrdisc", "").replace("-pdr", ""). \
524                     replace("-ndr", "").\
525                     replace("1t1c", "1c").replace("2t1c", "1c").\
526                     replace("2t2c", "2c").replace("4t2c", "2c").\
527                     replace("4t4c", "4c").replace("8t4c", "4c")
528                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
529                 if "across topologies" in table["title"].lower():
530                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
531                 try:
532                     # TODO: Re-work when NDRPDRDISC tests are not used
533                     if table["include-tests"] == "MRR":
534                         tbl_dict[tst_name_mod]["cmp-data"]. \
535                             append(tst_data["result"]["receive-rate"].avg)
536                     elif table["include-tests"] == "PDR":
537                         if tst_data["type"] == "PDR":
538                             tbl_dict[tst_name_mod]["cmp-data"]. \
539                                 append(tst_data["throughput"]["value"])
540                         elif tst_data["type"] == "NDRPDR":
541                             tbl_dict[tst_name_mod]["cmp-data"].append(
542                                 tst_data["throughput"]["PDR"]["LOWER"])
543                     elif table["include-tests"] == "NDR":
544                         if tst_data["type"] == "NDR":
545                             tbl_dict[tst_name_mod]["cmp-data"]. \
546                                 append(tst_data["throughput"]["value"])
547                         elif tst_data["type"] == "NDRPDR":
548                             tbl_dict[tst_name_mod]["cmp-data"].append(
549                                 tst_data["throughput"]["NDR"]["LOWER"])
550                     else:
551                         continue
552                 except KeyError:
553                     pass
554                 except TypeError:
555                     tbl_dict.pop(tst_name_mod, None)
556     if history:
557         for item in history:
558             for job, builds in item["data"].items():
559                 for build in builds:
560                     for tst_name, tst_data in data[job][str(build)].iteritems():
561                         if item["nic"] not in tst_data["tags"]:
562                             continue
563                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
564                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
565                             replace("-ndrdisc", "").replace("-pdr", ""). \
566                             replace("-ndr", "").\
567                             replace("1t1c", "1c").replace("2t1c", "1c").\
568                             replace("2t2c", "2c").replace("4t2c", "2c").\
569                             replace("4t4c", "4c").replace("8t4c", "4c")
570                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
571                         if "across topologies" in table["title"].lower():
572                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
573                         if tbl_dict.get(tst_name_mod, None) is None:
574                             continue
575                         if tbl_dict[tst_name_mod].get("history", None) is None:
576                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
577                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
578                                                              None) is None:
579                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
580                                 list()
581                         try:
582                             # TODO: Re-work when NDRPDRDISC tests are not used
583                             if table["include-tests"] == "MRR":
584                                 tbl_dict[tst_name_mod]["history"][item["title"
585                                 ]].append(tst_data["result"]["receive-rate"].
586                                           avg)
587                             elif table["include-tests"] == "PDR":
588                                 if tst_data["type"] == "PDR":
589                                     tbl_dict[tst_name_mod]["history"][
590                                         item["title"]].\
591                                         append(tst_data["throughput"]["value"])
592                                 elif tst_data["type"] == "NDRPDR":
593                                     tbl_dict[tst_name_mod]["history"][item[
594                                         "title"]].append(tst_data["throughput"][
595                                         "PDR"]["LOWER"])
596                             elif table["include-tests"] == "NDR":
597                                 if tst_data["type"] == "NDR":
598                                     tbl_dict[tst_name_mod]["history"][
599                                         item["title"]].\
600                                         append(tst_data["throughput"]["value"])
601                                 elif tst_data["type"] == "NDRPDR":
602                                     tbl_dict[tst_name_mod]["history"][item[
603                                         "title"]].append(tst_data["throughput"][
604                                         "NDR"]["LOWER"])
605                             else:
606                                 continue
607                         except (TypeError, KeyError):
608                             pass
609
610     tbl_lst = list()
611     for tst_name in tbl_dict.keys():
612         item = [tbl_dict[tst_name]["name"], ]
613         if history:
614             if tbl_dict[tst_name].get("history", None) is not None:
615                 for hist_data in tbl_dict[tst_name]["history"].values():
616                     if hist_data:
617                         item.append(round(mean(hist_data) / 1000000, 2))
618                         item.append(round(stdev(hist_data) / 1000000, 2))
619                     else:
620                         item.extend([None, None])
621             else:
622                 item.extend([None, None])
623         data_t = tbl_dict[tst_name]["ref-data"]
624         if data_t:
625             item.append(round(mean(data_t) / 1000000, 2))
626             item.append(round(stdev(data_t) / 1000000, 2))
627         else:
628             item.extend([None, None])
629         data_t = tbl_dict[tst_name]["cmp-data"]
630         if data_t:
631             item.append(round(mean(data_t) / 1000000, 2))
632             item.append(round(stdev(data_t) / 1000000, 2))
633         else:
634             item.extend([None, None])
635         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
636             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
637         if len(item) == len(header):
638             tbl_lst.append(item)
639
640     # Sort the table according to the relative change
641     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
642
643     # Generate csv tables:
644     csv_file = "{0}.csv".format(table["output-file"])
645     with open(csv_file, "w") as file_handler:
646         file_handler.write(header_str)
647         for test in tbl_lst:
648             file_handler.write(",".join([str(item) for item in test]) + "\n")
649
650     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
651
652
653 def table_nics_comparison(table, input_data):
654     """Generate the table(s) with algorithm: table_nics_comparison
655     specified in the specification file.
656
657     :param table: Table to generate.
658     :param input_data: Data to process.
659     :type table: pandas.Series
660     :type input_data: InputData
661     """
662
663     logging.info("  Generating the table {0} ...".
664                  format(table.get("title", "")))
665
666     # Transform the data
667     logging.info("    Creating the data set for the {0} '{1}'.".
668                  format(table.get("type", ""), table.get("title", "")))
669     data = input_data.filter_data(table, continue_on_error=True)
670
671     # Prepare the header of the tables
672     try:
673         header = ["Test case", ]
674
675         if table["include-tests"] == "MRR":
676             hdr_param = "Receive Rate"
677         else:
678             hdr_param = "Throughput"
679
680         header.extend(
681             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
682              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
683              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
684              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
685              "Delta [%]"])
686         header_str = ",".join(header) + "\n"
687     except (AttributeError, KeyError) as err:
688         logging.error("The model is invalid, missing parameter: {0}".
689                       format(err))
690         return
691
692     # Prepare data to the table:
693     tbl_dict = dict()
694     for job, builds in table["data"].items():
695         for build in builds:
696             for tst_name, tst_data in data[job][str(build)].iteritems():
697                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
698                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
699                     replace("-ndrdisc", "").replace("-pdr", "").\
700                     replace("-ndr", "").\
701                     replace("1t1c", "1c").replace("2t1c", "1c").\
702                     replace("2t2c", "2c").replace("4t2c", "2c").\
703                     replace("4t4c", "4c").replace("8t4c", "4c")
704                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
705                 if tbl_dict.get(tst_name_mod, None) is None:
706                     name = "-".join(tst_data["name"].split("-")[:-1])
707                     tbl_dict[tst_name_mod] = {"name": name,
708                                               "ref-data": list(),
709                                               "cmp-data": list()}
710                 try:
711                     if table["include-tests"] == "MRR":
712                         result = tst_data["result"]["receive-rate"].avg
713                     elif table["include-tests"] == "PDR":
714                         result = tst_data["throughput"]["PDR"]["LOWER"]
715                     elif table["include-tests"] == "NDR":
716                         result = tst_data["throughput"]["NDR"]["LOWER"]
717                     else:
718                         result = None
719
720                     if result:
721                         if table["reference"]["nic"] in tst_data["tags"]:
722                             tbl_dict[tst_name_mod]["ref-data"].append(result)
723                         elif table["compare"]["nic"] in tst_data["tags"]:
724                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
725                 except (TypeError, KeyError) as err:
726                     logging.debug("No data for {0}".format(tst_name))
727                     logging.debug(repr(err))
728                     # No data in output.xml for this test
729
730     tbl_lst = list()
731     for tst_name in tbl_dict.keys():
732         item = [tbl_dict[tst_name]["name"], ]
733         data_t = tbl_dict[tst_name]["ref-data"]
734         if data_t:
735             item.append(round(mean(data_t) / 1000000, 2))
736             item.append(round(stdev(data_t) / 1000000, 2))
737         else:
738             item.extend([None, None])
739         data_t = tbl_dict[tst_name]["cmp-data"]
740         if data_t:
741             item.append(round(mean(data_t) / 1000000, 2))
742             item.append(round(stdev(data_t) / 1000000, 2))
743         else:
744             item.extend([None, None])
745         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
746             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
747         if len(item) == len(header):
748             tbl_lst.append(item)
749
750     # Sort the table according to the relative change
751     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
752
753     # Generate csv tables:
754     csv_file = "{0}.csv".format(table["output-file"])
755     with open(csv_file, "w") as file_handler:
756         file_handler.write(header_str)
757         for test in tbl_lst:
758             file_handler.write(",".join([str(item) for item in test]) + "\n")
759
760     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
761
762
763 def table_soak_vs_ndr(table, input_data):
764     """Generate the table(s) with algorithm: table_soak_vs_ndr
765     specified in the specification file.
766
767     :param table: Table to generate.
768     :param input_data: Data to process.
769     :type table: pandas.Series
770     :type input_data: InputData
771     """
772
773     logging.info("  Generating the table {0} ...".
774                  format(table.get("title", "")))
775
776     # Transform the data
777     logging.info("    Creating the data set for the {0} '{1}'.".
778                  format(table.get("type", ""), table.get("title", "")))
779     data = input_data.filter_data(table, continue_on_error=True)
780
781     # Prepare the header of the table
782     try:
783         header = [
784             "Test case",
785             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
786             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
787             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
788             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
789             "Delta [%]", "Stdev of delta [%]"]
790         header_str = ",".join(header) + "\n"
791     except (AttributeError, KeyError) as err:
792         logging.error("The model is invalid, missing parameter: {0}".
793                       format(err))
794         return
795
796     # Create a list of available SOAK test results:
797     tbl_dict = dict()
798     for job, builds in table["compare"]["data"].items():
799         for build in builds:
800             for tst_name, tst_data in data[job][str(build)].iteritems():
801                 if tst_data["type"] == "SOAK":
802                     tst_name_mod = tst_name.replace("-soak", "")
803                     if tbl_dict.get(tst_name_mod, None) is None:
804                         groups = re.search(REGEX_NIC, tst_data["parent"])
805                         nic = groups.group(0) if groups else ""
806                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
807                                                               split("-")[:-1]))
808                         tbl_dict[tst_name_mod] = {
809                             "name": name,
810                             "ref-data": list(),
811                             "cmp-data": list()
812                         }
813                     try:
814                         tbl_dict[tst_name_mod]["cmp-data"].append(
815                             tst_data["throughput"]["LOWER"])
816                     except (KeyError, TypeError):
817                         pass
818     tests_lst = tbl_dict.keys()
819
820     # Add corresponding NDR test results:
821     for job, builds in table["reference"]["data"].items():
822         for build in builds:
823             for tst_name, tst_data in data[job][str(build)].iteritems():
824                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
825                     replace("-mrr", "")
826                 if tst_name_mod in tests_lst:
827                     try:
828                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
829                             if table["include-tests"] == "MRR":
830                                 result = tst_data["result"]["receive-rate"].avg
831                             elif table["include-tests"] == "PDR":
832                                 result = tst_data["throughput"]["PDR"]["LOWER"]
833                             elif table["include-tests"] == "NDR":
834                                 result = tst_data["throughput"]["NDR"]["LOWER"]
835                             else:
836                                 result = None
837                             if result is not None:
838                                 tbl_dict[tst_name_mod]["ref-data"].append(
839                                     result)
840                     except (KeyError, TypeError):
841                         continue
842
843     tbl_lst = list()
844     for tst_name in tbl_dict.keys():
845         item = [tbl_dict[tst_name]["name"], ]
846         data_r = tbl_dict[tst_name]["ref-data"]
847         if data_r:
848             data_r_mean = mean(data_r)
849             item.append(round(data_r_mean / 1000000, 2))
850             data_r_stdev = stdev(data_r)
851             item.append(round(data_r_stdev / 1000000, 2))
852         else:
853             data_r_mean = None
854             data_r_stdev = None
855             item.extend([None, None])
856         data_c = tbl_dict[tst_name]["cmp-data"]
857         if data_c:
858             data_c_mean = mean(data_c)
859             item.append(round(data_c_mean / 1000000, 2))
860             data_c_stdev = stdev(data_c)
861             item.append(round(data_c_stdev / 1000000, 2))
862         else:
863             data_c_mean = None
864             data_c_stdev = None
865             item.extend([None, None])
866         if data_r_mean and data_c_mean:
867             delta, d_stdev = relative_change_stdev(
868                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
869             item.append(round(delta, 2))
870             item.append(round(d_stdev, 2))
871             tbl_lst.append(item)
872
873     # Sort the table according to the relative change
874     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
875
876     # Generate csv tables:
877     csv_file = "{0}.csv".format(table["output-file"])
878     with open(csv_file, "w") as file_handler:
879         file_handler.write(header_str)
880         for test in tbl_lst:
881             file_handler.write(",".join([str(item) for item in test]) + "\n")
882
883     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
884
885
886 def table_performance_trending_dashboard(table, input_data):
887     """Generate the table(s) with algorithm:
888     table_performance_trending_dashboard
889     specified in the specification file.
890
891     :param table: Table to generate.
892     :param input_data: Data to process.
893     :type table: pandas.Series
894     :type input_data: InputData
895     """
896
897     logging.info("  Generating the table {0} ...".
898                  format(table.get("title", "")))
899
900     # Transform the data
901     logging.info("    Creating the data set for the {0} '{1}'.".
902                  format(table.get("type", ""), table.get("title", "")))
903     data = input_data.filter_data(table, continue_on_error=True)
904
905     # Prepare the header of the tables
906     header = ["Test Case",
907               "Trend [Mpps]",
908               "Short-Term Change [%]",
909               "Long-Term Change [%]",
910               "Regressions [#]",
911               "Progressions [#]"
912               ]
913     header_str = ",".join(header) + "\n"
914
915     # Prepare data to the table:
916     tbl_dict = dict()
917     for job, builds in table["data"].items():
918         for build in builds:
919             for tst_name, tst_data in data[job][str(build)].iteritems():
920                 if tst_name.lower() in table.get("ignore-list", list()):
921                     continue
922                 if tbl_dict.get(tst_name, None) is None:
923                     groups = re.search(REGEX_NIC, tst_data["parent"])
924                     if not groups:
925                         continue
926                     nic = groups.group(0)
927                     tbl_dict[tst_name] = {
928                         "name": "{0}-{1}".format(nic, tst_data["name"]),
929                         "data": OrderedDict()}
930                 try:
931                     tbl_dict[tst_name]["data"][str(build)] = \
932                         tst_data["result"]["receive-rate"]
933                 except (TypeError, KeyError):
934                     pass  # No data in output.xml for this test
935
936     tbl_lst = list()
937     for tst_name in tbl_dict.keys():
938         data_t = tbl_dict[tst_name]["data"]
939         if len(data_t) < 2:
940             continue
941
942         classification_lst, avgs = classify_anomalies(data_t)
943
944         win_size = min(len(data_t), table["window"])
945         long_win_size = min(len(data_t), table["long-trend-window"])
946
947         try:
948             max_long_avg = max(
949                 [x for x in avgs[-long_win_size:-win_size]
950                  if not isnan(x)])
951         except ValueError:
952             max_long_avg = nan
953         last_avg = avgs[-1]
954         avg_week_ago = avgs[max(-win_size, -len(avgs))]
955
956         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
957             rel_change_last = nan
958         else:
959             rel_change_last = round(
960                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
961
962         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
963             rel_change_long = nan
964         else:
965             rel_change_long = round(
966                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
967
968         if classification_lst:
969             if isnan(rel_change_last) and isnan(rel_change_long):
970                 continue
971             if (isnan(last_avg) or
972                 isnan(rel_change_last) or
973                 isnan(rel_change_long)):
974                 continue
975             tbl_lst.append(
976                 [tbl_dict[tst_name]["name"],
977                  round(last_avg / 1000000, 2),
978                  rel_change_last,
979                  rel_change_long,
980                  classification_lst[-win_size:].count("regression"),
981                  classification_lst[-win_size:].count("progression")])
982
983     tbl_lst.sort(key=lambda rel: rel[0])
984
985     tbl_sorted = list()
986     for nrr in range(table["window"], -1, -1):
987         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
988         for nrp in range(table["window"], -1, -1):
989             tbl_out = [item for item in tbl_reg if item[5] == nrp]
990             tbl_out.sort(key=lambda rel: rel[2])
991             tbl_sorted.extend(tbl_out)
992
993     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
994
995     logging.info("    Writing file: '{0}'".format(file_name))
996     with open(file_name, "w") as file_handler:
997         file_handler.write(header_str)
998         for test in tbl_sorted:
999             file_handler.write(",".join([str(item) for item in test]) + '\n')
1000
1001     txt_file_name = "{0}.txt".format(table["output-file"])
1002     logging.info("    Writing file: '{0}'".format(txt_file_name))
1003     convert_csv_to_pretty_txt(file_name, txt_file_name)
1004
1005
1006 def _generate_url(base, testbed, test_name):
1007     """Generate URL to a trending plot from the name of the test case.
1008
1009     :param base: The base part of URL common to all test cases.
1010     :param testbed: The testbed used for testing.
1011     :param test_name: The name of the test case.
1012     :type base: str
1013     :type testbed: str
1014     :type test_name: str
1015     :returns: The URL to the plot with the trending data for the given test
1016         case.
1017     :rtype str
1018     """
1019
1020     url = base
1021     file_name = ""
1022     anchor = ".html#"
1023     feature = ""
1024
1025     if "lbdpdk" in test_name or "lbvpp" in test_name:
1026         file_name = "link_bonding"
1027
1028     elif "114b" in test_name and "vhost" in test_name:
1029         file_name = "vts"
1030
1031     elif "testpmd" in test_name or "l3fwd" in test_name:
1032         file_name = "dpdk"
1033
1034     elif "memif" in test_name:
1035         file_name = "container_memif"
1036         feature = "-base"
1037
1038     elif "srv6" in test_name:
1039         file_name = "srv6"
1040
1041     elif "vhost" in test_name:
1042         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1043             file_name = "vm_vhost_l2"
1044             if "114b" in test_name:
1045                 feature = ""
1046             elif "l2xcbase" in test_name and "x520" in test_name:
1047                 feature = "-base-l2xc"
1048             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1049                 feature = "-base-l2bd"
1050             else:
1051                 feature = "-base"
1052         elif "ip4base" in test_name:
1053             file_name = "vm_vhost_ip4"
1054             feature = "-base"
1055
1056     elif "ipsecbasetnlsw" in test_name:
1057         file_name = "ipsecsw"
1058         feature = "-base-scale"
1059
1060     elif "ipsec" in test_name:
1061         file_name = "ipsec"
1062         feature = "-base-scale"
1063         if "hw-" in test_name:
1064             file_name = "ipsechw"
1065         elif "sw-" in test_name:
1066             file_name = "ipsecsw"
1067         if "-int-" in test_name:
1068             feature = "-base-scale-int"
1069         elif "tnl" in test_name:
1070             feature = "-base-scale-tnl"
1071
1072     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1073         file_name = "ip4_tunnels"
1074         feature = "-base"
1075
1076     elif "ip4base" in test_name or "ip4scale" in test_name:
1077         file_name = "ip4"
1078         if "xl710" in test_name:
1079             feature = "-base-scale-features"
1080         elif "iacl" in test_name:
1081             feature = "-features-iacl"
1082         elif "oacl" in test_name:
1083             feature = "-features-oacl"
1084         elif "snat" in test_name or "cop" in test_name:
1085             feature = "-features"
1086         else:
1087             feature = "-base-scale"
1088
1089     elif "ip6base" in test_name or "ip6scale" in test_name:
1090         file_name = "ip6"
1091         feature = "-base-scale"
1092
1093     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1094             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1095             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1096         file_name = "l2"
1097         if "macip" in test_name:
1098             feature = "-features-macip"
1099         elif "iacl" in test_name:
1100             feature = "-features-iacl"
1101         elif "oacl" in test_name:
1102             feature = "-features-oacl"
1103         else:
1104             feature = "-base-scale"
1105
1106     if "x520" in test_name:
1107         nic = "x520-"
1108     elif "x710" in test_name:
1109         nic = "x710-"
1110     elif "xl710" in test_name:
1111         nic = "xl710-"
1112     elif "xxv710" in test_name:
1113         nic = "xxv710-"
1114     elif "vic1227" in test_name:
1115         nic = "vic1227-"
1116     elif "vic1385" in test_name:
1117         nic = "vic1385-"
1118     elif "x553" in test_name:
1119         nic = "x553-"
1120     else:
1121         nic = ""
1122     anchor += nic
1123
1124     if "64b" in test_name:
1125         framesize = "64b"
1126     elif "78b" in test_name:
1127         framesize = "78b"
1128     elif "imix" in test_name:
1129         framesize = "imix"
1130     elif "9000b" in test_name:
1131         framesize = "9000b"
1132     elif "1518b" in test_name:
1133         framesize = "1518b"
1134     elif "114b" in test_name:
1135         framesize = "114b"
1136     else:
1137         framesize = ""
1138     anchor += framesize + '-'
1139
1140     if "1t1c" in test_name:
1141         anchor += "1t1c"
1142     elif "2t2c" in test_name:
1143         anchor += "2t2c"
1144     elif "4t4c" in test_name:
1145         anchor += "4t4c"
1146     elif "2t1c" in test_name:
1147         anchor += "2t1c"
1148     elif "4t2c" in test_name:
1149         anchor += "4t2c"
1150     elif "8t4c" in test_name:
1151         anchor += "8t4c"
1152
1153     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1154         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1155
1156
1157 def table_performance_trending_dashboard_html(table, input_data):
1158     """Generate the table(s) with algorithm:
1159     table_performance_trending_dashboard_html specified in the specification
1160     file.
1161
1162     :param table: Table to generate.
1163     :param input_data: Data to process.
1164     :type table: dict
1165     :type input_data: InputData
1166     """
1167
1168     testbed = table.get("testbed", None)
1169     if testbed is None:
1170         logging.error("The testbed is not defined for the table '{0}'.".
1171                       format(table.get("title", "")))
1172         return
1173
1174     logging.info("  Generating the table {0} ...".
1175                  format(table.get("title", "")))
1176
1177     try:
1178         with open(table["input-file"], 'rb') as csv_file:
1179             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1180             csv_lst = [item for item in csv_content]
1181     except KeyError:
1182         logging.warning("The input file is not defined.")
1183         return
1184     except csv.Error as err:
1185         logging.warning("Not possible to process the file '{0}'.\n{1}".
1186                         format(table["input-file"], err))
1187         return
1188
1189     # Table:
1190     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1191
1192     # Table header:
1193     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1194     for idx, item in enumerate(csv_lst[0]):
1195         alignment = "left" if idx == 0 else "center"
1196         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1197         th.text = item
1198
1199     # Rows:
1200     colors = {"regression": ("#ffcccc", "#ff9999"),
1201               "progression": ("#c6ecc6", "#9fdf9f"),
1202               "normal": ("#e9f1fb", "#d4e4f7")}
1203     for r_idx, row in enumerate(csv_lst[1:]):
1204         if int(row[4]):
1205             color = "regression"
1206         elif int(row[5]):
1207             color = "progression"
1208         else:
1209             color = "normal"
1210         background = colors[color][r_idx % 2]
1211         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1212
1213         # Columns:
1214         for c_idx, item in enumerate(row):
1215             alignment = "left" if c_idx == 0 else "center"
1216             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1217             # Name:
1218             if c_idx == 0:
1219                 url = _generate_url("../trending/", testbed, item)
1220                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1221                 ref.text = item
1222             else:
1223                 td.text = item
1224     try:
1225         with open(table["output-file"], 'w') as html_file:
1226             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1227             html_file.write(".. raw:: html\n\n\t")
1228             html_file.write(ET.tostring(dashboard))
1229             html_file.write("\n\t<p><br><br></p>\n")
1230     except KeyError:
1231         logging.warning("The output file is not defined.")
1232         return
1233
1234
1235 def table_last_failed_tests(table, input_data):
1236     """Generate the table(s) with algorithm: table_last_failed_tests
1237     specified in the specification file.
1238
1239     :param table: Table to generate.
1240     :param input_data: Data to process.
1241     :type table: pandas.Series
1242     :type input_data: InputData
1243     """
1244
1245     logging.info("  Generating the table {0} ...".
1246                  format(table.get("title", "")))
1247
1248     # Transform the data
1249     logging.info("    Creating the data set for the {0} '{1}'.".
1250                  format(table.get("type", ""), table.get("title", "")))
1251     data = input_data.filter_data(table, continue_on_error=True)
1252
1253     if data is None or data.empty:
1254         logging.warn("    No data for the {0} '{1}'.".
1255                      format(table.get("type", ""), table.get("title", "")))
1256         return
1257
1258     tbl_list = list()
1259     for job, builds in table["data"].items():
1260         for build in builds:
1261             build = str(build)
1262             try:
1263                 version = input_data.metadata(job, build).get("version", "")
1264             except KeyError:
1265                 logging.error("Data for {job}: {build} is not present.".
1266                               format(job=job, build=build))
1267                 return
1268             tbl_list.append(build)
1269             tbl_list.append(version)
1270             for tst_name, tst_data in data[job][build].iteritems():
1271                 if tst_data["status"] != "FAIL":
1272                     continue
1273                 groups = re.search(REGEX_NIC, tst_data["parent"])
1274                 if not groups:
1275                     continue
1276                 nic = groups.group(0)
1277                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1278
1279     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1280     logging.info("    Writing file: '{0}'".format(file_name))
1281     with open(file_name, "w") as file_handler:
1282         for test in tbl_list:
1283             file_handler.write(test + '\n')
1284
1285
1286 def table_failed_tests(table, input_data):
1287     """Generate the table(s) with algorithm: table_failed_tests
1288     specified in the specification file.
1289
1290     :param table: Table to generate.
1291     :param input_data: Data to process.
1292     :type table: pandas.Series
1293     :type input_data: InputData
1294     """
1295
1296     logging.info("  Generating the table {0} ...".
1297                  format(table.get("title", "")))
1298
1299     # Transform the data
1300     logging.info("    Creating the data set for the {0} '{1}'.".
1301                  format(table.get("type", ""), table.get("title", "")))
1302     data = input_data.filter_data(table, continue_on_error=True)
1303
1304     # Prepare the header of the tables
1305     header = ["Test Case",
1306               "Failures [#]",
1307               "Last Failure [Time]",
1308               "Last Failure [VPP-Build-Id]",
1309               "Last Failure [CSIT-Job-Build-Id]"]
1310
1311     # Generate the data for the table according to the model in the table
1312     # specification
1313
1314     now = dt.utcnow()
1315     timeperiod = timedelta(int(table.get("window", 7)))
1316
1317     tbl_dict = dict()
1318     for job, builds in table["data"].items():
1319         for build in builds:
1320             build = str(build)
1321             for tst_name, tst_data in data[job][build].iteritems():
1322                 if tst_name.lower() in table.get("ignore-list", list()):
1323                     continue
1324                 if tbl_dict.get(tst_name, None) is None:
1325                     groups = re.search(REGEX_NIC, tst_data["parent"])
1326                     if not groups:
1327                         continue
1328                     nic = groups.group(0)
1329                     tbl_dict[tst_name] = {
1330                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1331                         "data": OrderedDict()}
1332                 try:
1333                     generated = input_data.metadata(job, build).\
1334                         get("generated", "")
1335                     if not generated:
1336                         continue
1337                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1338                     if (now - then) <= timeperiod:
1339                         tbl_dict[tst_name]["data"][build] = (
1340                             tst_data["status"],
1341                             generated,
1342                             input_data.metadata(job, build).get("version", ""),
1343                             build)
1344                 except (TypeError, KeyError) as err:
1345                     logging.warning("tst_name: {} - err: {}".
1346                                     format(tst_name, repr(err)))
1347
1348     max_fails = 0
1349     tbl_lst = list()
1350     for tst_data in tbl_dict.values():
1351         fails_nr = 0
1352         for val in tst_data["data"].values():
1353             if val[0] == "FAIL":
1354                 fails_nr += 1
1355                 fails_last_date = val[1]
1356                 fails_last_vpp = val[2]
1357                 fails_last_csit = val[3]
1358         if fails_nr:
1359             max_fails = fails_nr if fails_nr > max_fails else max_fails
1360             tbl_lst.append([tst_data["name"],
1361                             fails_nr,
1362                             fails_last_date,
1363                             fails_last_vpp,
1364                             "mrr-daily-build-{0}".format(fails_last_csit)])
1365
1366     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1367     tbl_sorted = list()
1368     for nrf in range(max_fails, -1, -1):
1369         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1370         tbl_sorted.extend(tbl_fails)
1371     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1372
1373     logging.info("    Writing file: '{0}'".format(file_name))
1374     with open(file_name, "w") as file_handler:
1375         file_handler.write(",".join(header) + "\n")
1376         for test in tbl_sorted:
1377             file_handler.write(",".join([str(item) for item in test]) + '\n')
1378
1379     txt_file_name = "{0}.txt".format(table["output-file"])
1380     logging.info("    Writing file: '{0}'".format(txt_file_name))
1381     convert_csv_to_pretty_txt(file_name, txt_file_name)
1382
1383
1384 def table_failed_tests_html(table, input_data):
1385     """Generate the table(s) with algorithm: table_failed_tests_html
1386     specified in the specification file.
1387
1388     :param table: Table to generate.
1389     :param input_data: Data to process.
1390     :type table: pandas.Series
1391     :type input_data: InputData
1392     """
1393
1394     testbed = table.get("testbed", None)
1395     if testbed is None:
1396         logging.error("The testbed is not defined for the table '{0}'.".
1397                       format(table.get("title", "")))
1398         return
1399
1400     logging.info("  Generating the table {0} ...".
1401                  format(table.get("title", "")))
1402
1403     try:
1404         with open(table["input-file"], 'rb') as csv_file:
1405             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1406             csv_lst = [item for item in csv_content]
1407     except KeyError:
1408         logging.warning("The input file is not defined.")
1409         return
1410     except csv.Error as err:
1411         logging.warning("Not possible to process the file '{0}'.\n{1}".
1412                         format(table["input-file"], err))
1413         return
1414
1415     # Table:
1416     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1417
1418     # Table header:
1419     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1420     for idx, item in enumerate(csv_lst[0]):
1421         alignment = "left" if idx == 0 else "center"
1422         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1423         th.text = item
1424
1425     # Rows:
1426     colors = ("#e9f1fb", "#d4e4f7")
1427     for r_idx, row in enumerate(csv_lst[1:]):
1428         background = colors[r_idx % 2]
1429         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1430
1431         # Columns:
1432         for c_idx, item in enumerate(row):
1433             alignment = "left" if c_idx == 0 else "center"
1434             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1435             # Name:
1436             if c_idx == 0:
1437                 url = _generate_url("../trending/", testbed, item)
1438                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1439                 ref.text = item
1440             else:
1441                 td.text = item
1442     try:
1443         with open(table["output-file"], 'w') as html_file:
1444             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1445             html_file.write(".. raw:: html\n\n\t")
1446             html_file.write(ET.tostring(failed_tests))
1447             html_file.write("\n\t<p><br><br></p>\n")
1448     except KeyError:
1449         logging.warning("The output file is not defined.")
1450         return