03fcf40ded4b9f4b9f99d7e2550ecb6661d30d99
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def _tpc_modify_test_name(test_name):
191     test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
192         replace("-ndrpdr", "").replace("-pdrdisc", ""). \
193         replace("-ndrdisc", "").replace("-pdr", ""). \
194         replace("-ndr", ""). \
195         replace("1t1c", "1c").replace("2t1c", "1c"). \
196         replace("2t2c", "2c").replace("4t2c", "2c"). \
197         replace("4t4c", "4c").replace("8t4c", "4c")
198     test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
199     return test_name_mod
200
201
202 def _tpc_modify_displayed_test_name(test_name):
203     return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
204         replace("2t2c", "2c").replace("4t2c", "2c"). \
205         replace("4t4c", "4c").replace("8t4c", "4c")
206
207
208 def _tpc_insert_data(target, src, include_tests):
209     try:
210         if include_tests == "MRR":
211             target.append(src["result"]["receive-rate"].avg)
212         elif include_tests == "PDR":
213             target.append(src["throughput"]["PDR"]["LOWER"])
214         elif include_tests == "NDR":
215             target.append(src["throughput"]["NDR"]["LOWER"])
216     except (KeyError, TypeError):
217         pass
218
219
220 def _tpc_sort_table(table):
221     # Sort the table:
222     # 1. New in CSIT-XXXX
223     # 2. See footnote
224     # 3. Delta
225     tbl_new = list()
226     tbl_see = list()
227     tbl_delta = list()
228     for item in table:
229         if isinstance(item[-1], str):
230             if "New in CSIT" in item[-1]:
231                 tbl_new.append(item)
232             elif "See footnote" in item[-1]:
233                 tbl_see.append(item)
234         else:
235             tbl_delta.append(item)
236
237     # Sort the tables:
238     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
239     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
240     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
241     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
242
243     # Put the tables together:
244     table = list()
245     table.extend(tbl_new)
246     table.extend(tbl_see)
247     table.extend(tbl_delta)
248
249     return table
250
251
252 def table_performance_comparison(table, input_data):
253     """Generate the table(s) with algorithm: table_performance_comparison
254     specified in the specification file.
255
256     :param table: Table to generate.
257     :param input_data: Data to process.
258     :type table: pandas.Series
259     :type input_data: InputData
260     """
261
262     logging.info("  Generating the table {0} ...".
263                  format(table.get("title", "")))
264
265     # Transform the data
266     logging.info("    Creating the data set for the {0} '{1}'.".
267                  format(table.get("type", ""), table.get("title", "")))
268     data = input_data.filter_data(table, continue_on_error=True)
269
270     # Prepare the header of the tables
271     try:
272         header = ["Test case", ]
273
274         if table["include-tests"] == "MRR":
275             hdr_param = "Rec Rate"
276         else:
277             hdr_param = "Thput"
278
279         history = table.get("history", None)
280         if history:
281             for item in history:
282                 header.extend(
283                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
284                      "{0} Stdev [Mpps]".format(item["title"])])
285         header.extend(
286             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
287              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
288              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
289              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
290              "Delta [%]"])
291         header_str = ",".join(header) + "\n"
292     except (AttributeError, KeyError) as err:
293         logging.error("The model is invalid, missing parameter: {0}".
294                       format(err))
295         return
296
297     # Prepare data to the table:
298     tbl_dict = dict()
299     for job, builds in table["reference"]["data"].items():
300         topo = "2n-skx" if "2n-skx" in job else ""
301         for build in builds:
302             for tst_name, tst_data in data[job][str(build)].iteritems():
303                 tst_name_mod = _tpc_modify_test_name(tst_name)
304                 if "across topologies" in table["title"].lower():
305                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
306                 if tbl_dict.get(tst_name_mod, None) is None:
307                     groups = re.search(REGEX_NIC, tst_data["parent"])
308                     nic = groups.group(0) if groups else ""
309                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
310                                                           split("-")[:-1]))
311                     if "across testbeds" in table["title"].lower() or \
312                             "across topologies" in table["title"].lower():
313                         name = _tpc_modify_displayed_test_name(name)
314                     tbl_dict[tst_name_mod] = {"name": name,
315                                               "ref-data": list(),
316                                               "cmp-data": list()}
317                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
318                                  src=tst_data,
319                                  include_tests=table["include-tests"])
320
321     for job, builds in table["compare"]["data"].items():
322         for build in builds:
323             for tst_name, tst_data in data[job][str(build)].iteritems():
324                 tst_name_mod = _tpc_modify_test_name(tst_name)
325                 if "across topologies" in table["title"].lower():
326                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
327                 if tbl_dict.get(tst_name_mod, None) is None:
328                     groups = re.search(REGEX_NIC, tst_data["parent"])
329                     nic = groups.group(0) if groups else ""
330                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
331                                                           split("-")[:-1]))
332                     if "across testbeds" in table["title"].lower() or \
333                             "across topologies" in table["title"].lower():
334                         name = _tpc_modify_displayed_test_name(name)
335                     tbl_dict[tst_name_mod] = {"name": name,
336                                               "ref-data": list(),
337                                               "cmp-data": list()}
338                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
339                                  src=tst_data,
340                                  include_tests=table["include-tests"])
341
342     replacement = table["compare"].get("data-replacement", None)
343     if replacement:
344         create_new_list = True
345         rpl_data = input_data.filter_data(
346             table, data=replacement, continue_on_error=True)
347         for job, builds in replacement.items():
348             for build in builds:
349                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
350                     tst_name_mod = _tpc_modify_test_name(tst_name)
351                     if "across topologies" in table["title"].lower():
352                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
353                     if tbl_dict.get(tst_name_mod, None) is None:
354                         name = "{0}".format("-".join(tst_data["name"].
355                                                      split("-")[:-1]))
356                         if "across testbeds" in table["title"].lower() or \
357                                 "across topologies" in table["title"].lower():
358                             name = _tpc_modify_displayed_test_name(name)
359                         tbl_dict[tst_name_mod] = {"name": name,
360                                                   "ref-data": list(),
361                                                   "cmp-data": list()}
362                     if create_new_list:
363                         create_new_list = False
364                         tbl_dict[tst_name_mod]["cmp-data"] = list()
365
366                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
367                                      src=tst_data,
368                                      include_tests=table["include-tests"])
369
370     if history:
371         for item in history:
372             for job, builds in item["data"].items():
373                 for build in builds:
374                     for tst_name, tst_data in data[job][str(build)].iteritems():
375                         tst_name_mod = _tpc_modify_test_name(tst_name)
376                         if "across topologies" in table["title"].lower():
377                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
378                         if tbl_dict.get(tst_name_mod, None) is None:
379                             continue
380                         if tbl_dict[tst_name_mod].get("history", None) is None:
381                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
382                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
383                                                              None) is None:
384                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
385                                 list()
386                         try:
387                             # TODO: Re-work when NDRPDRDISC tests are not used
388                             if table["include-tests"] == "MRR":
389                                 tbl_dict[tst_name_mod]["history"][item["title"
390                                 ]].append(tst_data["result"]["receive-rate"].
391                                           avg)
392                             elif table["include-tests"] == "PDR":
393                                 if tst_data["type"] == "PDR":
394                                     tbl_dict[tst_name_mod]["history"][
395                                         item["title"]].\
396                                         append(tst_data["throughput"]["value"])
397                                 elif tst_data["type"] == "NDRPDR":
398                                     tbl_dict[tst_name_mod]["history"][item[
399                                         "title"]].append(tst_data["throughput"][
400                                         "PDR"]["LOWER"])
401                             elif table["include-tests"] == "NDR":
402                                 if tst_data["type"] == "NDR":
403                                     tbl_dict[tst_name_mod]["history"][
404                                         item["title"]].\
405                                         append(tst_data["throughput"]["value"])
406                                 elif tst_data["type"] == "NDRPDR":
407                                     tbl_dict[tst_name_mod]["history"][item[
408                                         "title"]].append(tst_data["throughput"][
409                                         "NDR"]["LOWER"])
410                             else:
411                                 continue
412                         except (TypeError, KeyError):
413                             pass
414
415     tbl_lst = list()
416     footnote = False
417     for tst_name in tbl_dict.keys():
418         item = [tbl_dict[tst_name]["name"], ]
419         if history:
420             if tbl_dict[tst_name].get("history", None) is not None:
421                 for hist_data in tbl_dict[tst_name]["history"].values():
422                     if hist_data:
423                         item.append(round(mean(hist_data) / 1000000, 2))
424                         item.append(round(stdev(hist_data) / 1000000, 2))
425                     else:
426                         item.extend(["Not tested", "Not tested"])
427             else:
428                 item.extend(["Not tested", "Not tested"])
429         data_t = tbl_dict[tst_name]["ref-data"]
430         if data_t:
431             item.append(round(mean(data_t) / 1000000, 2))
432             item.append(round(stdev(data_t) / 1000000, 2))
433         else:
434             item.extend(["Not tested", "Not tested"])
435         data_t = tbl_dict[tst_name]["cmp-data"]
436         if data_t:
437             item.append(round(mean(data_t) / 1000000, 2))
438             item.append(round(stdev(data_t) / 1000000, 2))
439         else:
440             item.extend(["Not tested", "Not tested"])
441         if item[-2] == "Not tested":
442             pass
443         elif item[-4] == "Not tested":
444             item.append("New in CSIT-1908")
445         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
446             item.append("See footnote [1]")
447             footnote = True
448         elif item[-4] != 0:
449             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
450         if (len(item) == len(header)) and (item[-3] != "Not tested"):
451             tbl_lst.append(item)
452
453     tbl_lst = _tpc_sort_table(tbl_lst)
454
455     # Generate csv tables:
456     csv_file = "{0}.csv".format(table["output-file"])
457     with open(csv_file, "w") as file_handler:
458         file_handler.write(header_str)
459         for test in tbl_lst:
460             file_handler.write(",".join([str(item) for item in test]) + "\n")
461
462     txt_file_name = "{0}.txt".format(table["output-file"])
463     convert_csv_to_pretty_txt(csv_file, txt_file_name)
464
465     if footnote:
466         with open(txt_file_name, 'a') as txt_file:
467             txt_file.writelines([
468                 "\nFootnotes:\n",
469                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
470                 "2-node testbeds, dot1q encapsulation is now used on both "
471                 "links of SUT.\n",
472                 "    Previously dot1q was used only on a single link with the "
473                 "other link carrying untagged Ethernet frames. This changes "
474                 "results\n",
475                 "    in slightly lower throughput in CSIT-1908 for these "
476                 "tests. See release notes."
477             ])
478
479
480 def table_performance_comparison_nic(table, input_data):
481     """Generate the table(s) with algorithm: table_performance_comparison
482     specified in the specification file.
483
484     :param table: Table to generate.
485     :param input_data: Data to process.
486     :type table: pandas.Series
487     :type input_data: InputData
488     """
489
490     logging.info("  Generating the table {0} ...".
491                  format(table.get("title", "")))
492
493     # Transform the data
494     logging.info("    Creating the data set for the {0} '{1}'.".
495                  format(table.get("type", ""), table.get("title", "")))
496     data = input_data.filter_data(table, continue_on_error=True)
497
498     # Prepare the header of the tables
499     try:
500         header = ["Test case", ]
501
502         if table["include-tests"] == "MRR":
503             hdr_param = "Rec Rate"
504         else:
505             hdr_param = "Thput"
506
507         history = table.get("history", None)
508         if history:
509             for item in history:
510                 header.extend(
511                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
512                      "{0} Stdev [Mpps]".format(item["title"])])
513         header.extend(
514             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
515              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
516              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
517              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
518              "Delta [%]"])
519         header_str = ",".join(header) + "\n"
520     except (AttributeError, KeyError) as err:
521         logging.error("The model is invalid, missing parameter: {0}".
522                       format(err))
523         return
524
525     # Prepare data to the table:
526     tbl_dict = dict()
527     for job, builds in table["reference"]["data"].items():
528         topo = "2n-skx" if "2n-skx" in job else ""
529         for build in builds:
530             for tst_name, tst_data in data[job][str(build)].iteritems():
531                 if table["reference"]["nic"] not in tst_data["tags"]:
532                     continue
533                 tst_name_mod = _tpc_modify_test_name(tst_name)
534                 if "across topologies" in table["title"].lower():
535                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
536                 if tbl_dict.get(tst_name_mod, None) is None:
537                     name = "{0}".format("-".join(tst_data["name"].
538                                                  split("-")[:-1]))
539                     if "across testbeds" in table["title"].lower() or \
540                             "across topologies" in table["title"].lower():
541                         name = _tpc_modify_displayed_test_name(name)
542                     tbl_dict[tst_name_mod] = {"name": name,
543                                               "ref-data": list(),
544                                               "cmp-data": list()}
545                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
546                                  src=tst_data,
547                                  include_tests=table["include-tests"])
548
549     for job, builds in table["compare"]["data"].items():
550         for build in builds:
551             for tst_name, tst_data in data[job][str(build)].iteritems():
552                 if table["compare"]["nic"] not in tst_data["tags"]:
553                     continue
554                 tst_name_mod = _tpc_modify_test_name(tst_name)
555                 if "across topologies" in table["title"].lower():
556                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
557                 if tbl_dict.get(tst_name_mod, None) is None:
558                     name = "{0}".format("-".join(tst_data["name"].
559                                                  split("-")[:-1]))
560                     if "across testbeds" in table["title"].lower() or \
561                             "across topologies" in table["title"].lower():
562                         name = _tpc_modify_displayed_test_name(name)
563                     tbl_dict[tst_name_mod] = {"name": name,
564                                               "ref-data": list(),
565                                               "cmp-data": list()}
566                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
567                                  src=tst_data,
568                                  include_tests=table["include-tests"])
569
570     replacement = table["compare"].get("data-replacement", None)
571     if replacement:
572         create_new_list = True
573         rpl_data = input_data.filter_data(
574             table, data=replacement, continue_on_error=True)
575         for job, builds in replacement.items():
576             for build in builds:
577                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
578                     if table["compare"]["nic"] not in tst_data["tags"]:
579                         continue
580                     tst_name_mod = _tpc_modify_test_name(tst_name)
581                     if "across topologies" in table["title"].lower():
582                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
583                     if tbl_dict.get(tst_name_mod, None) is None:
584                         name = "{0}".format("-".join(tst_data["name"].
585                                                      split("-")[:-1]))
586                         if "across testbeds" in table["title"].lower() or \
587                                 "across topologies" in table["title"].lower():
588                             name = _tpc_modify_displayed_test_name(name)
589                         tbl_dict[tst_name_mod] = {"name": name,
590                                                   "ref-data": list(),
591                                                   "cmp-data": list()}
592                     if create_new_list:
593                         create_new_list = False
594                         tbl_dict[tst_name_mod]["cmp-data"] = list()
595
596                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
597                                      src=tst_data,
598                                      include_tests=table["include-tests"])
599
600     if history:
601         for item in history:
602             for job, builds in item["data"].items():
603                 for build in builds:
604                     for tst_name, tst_data in data[job][str(build)].iteritems():
605                         if item["nic"] not in tst_data["tags"]:
606                             continue
607                         tst_name_mod = _tpc_modify_test_name(tst_name)
608                         if "across topologies" in table["title"].lower():
609                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
610                         if tbl_dict.get(tst_name_mod, None) is None:
611                             continue
612                         if tbl_dict[tst_name_mod].get("history", None) is None:
613                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
614                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
615                                                              None) is None:
616                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
617                                 list()
618                         try:
619                             # TODO: Re-work when NDRPDRDISC tests are not used
620                             if table["include-tests"] == "MRR":
621                                 tbl_dict[tst_name_mod]["history"][item["title"
622                                 ]].append(tst_data["result"]["receive-rate"].
623                                           avg)
624                             elif table["include-tests"] == "PDR":
625                                 if tst_data["type"] == "PDR":
626                                     tbl_dict[tst_name_mod]["history"][
627                                         item["title"]].\
628                                         append(tst_data["throughput"]["value"])
629                                 elif tst_data["type"] == "NDRPDR":
630                                     tbl_dict[tst_name_mod]["history"][item[
631                                         "title"]].append(tst_data["throughput"][
632                                         "PDR"]["LOWER"])
633                             elif table["include-tests"] == "NDR":
634                                 if tst_data["type"] == "NDR":
635                                     tbl_dict[tst_name_mod]["history"][
636                                         item["title"]].\
637                                         append(tst_data["throughput"]["value"])
638                                 elif tst_data["type"] == "NDRPDR":
639                                     tbl_dict[tst_name_mod]["history"][item[
640                                         "title"]].append(tst_data["throughput"][
641                                         "NDR"]["LOWER"])
642                             else:
643                                 continue
644                         except (TypeError, KeyError):
645                             pass
646
647     tbl_lst = list()
648     footnote = False
649     for tst_name in tbl_dict.keys():
650         item = [tbl_dict[tst_name]["name"], ]
651         if history:
652             if tbl_dict[tst_name].get("history", None) is not None:
653                 for hist_data in tbl_dict[tst_name]["history"].values():
654                     if hist_data:
655                         item.append(round(mean(hist_data) / 1000000, 2))
656                         item.append(round(stdev(hist_data) / 1000000, 2))
657                     else:
658                         item.extend(["Not tested", "Not tested"])
659             else:
660                 item.extend(["Not tested", "Not tested"])
661         data_t = tbl_dict[tst_name]["ref-data"]
662         if data_t:
663             item.append(round(mean(data_t) / 1000000, 2))
664             item.append(round(stdev(data_t) / 1000000, 2))
665         else:
666             item.extend(["Not tested", "Not tested"])
667         data_t = tbl_dict[tst_name]["cmp-data"]
668         if data_t:
669             item.append(round(mean(data_t) / 1000000, 2))
670             item.append(round(stdev(data_t) / 1000000, 2))
671         else:
672             item.extend(["Not tested", "Not tested"])
673         if item[-2] == "Not tested":
674             pass
675         elif item[-4] == "Not tested":
676             item.append("New in CSIT-1908")
677         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
678             item.append("See footnote [1]")
679             footnote = True
680         elif item[-4] != 0:
681             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
682         if (len(item) == len(header)) and (item[-3] != "Not tested"):
683             tbl_lst.append(item)
684
685     tbl_lst = _tpc_sort_table(tbl_lst)
686
687     # Generate csv tables:
688     csv_file = "{0}.csv".format(table["output-file"])
689     with open(csv_file, "w") as file_handler:
690         file_handler.write(header_str)
691         for test in tbl_lst:
692             file_handler.write(",".join([str(item) for item in test]) + "\n")
693
694     txt_file_name = "{0}.txt".format(table["output-file"])
695     convert_csv_to_pretty_txt(csv_file, txt_file_name)
696
697     if footnote:
698         with open(txt_file_name, 'a') as txt_file:
699             txt_file.writelines([
700                 "\nFootnotes:\n",
701                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
702                 "2-node testbeds, dot1q encapsulation is now used on both "
703                 "links of SUT.\n",
704                 "    Previously dot1q was used only on a single link with the "
705                 "other link carrying untagged Ethernet frames. This changes "
706                 "results\n",
707                 "    in slightly lower throughput in CSIT-1908 for these "
708                 "tests. See release notes."
709             ])
710
711
712 def table_nics_comparison(table, input_data):
713     """Generate the table(s) with algorithm: table_nics_comparison
714     specified in the specification file.
715
716     :param table: Table to generate.
717     :param input_data: Data to process.
718     :type table: pandas.Series
719     :type input_data: InputData
720     """
721
722     logging.info("  Generating the table {0} ...".
723                  format(table.get("title", "")))
724
725     # Transform the data
726     logging.info("    Creating the data set for the {0} '{1}'.".
727                  format(table.get("type", ""), table.get("title", "")))
728     data = input_data.filter_data(table, continue_on_error=True)
729
730     # Prepare the header of the tables
731     try:
732         header = ["Test case", ]
733
734         if table["include-tests"] == "MRR":
735             hdr_param = "Rec Rate"
736         else:
737             hdr_param = "Thput"
738
739         header.extend(
740             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
741              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
742              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
743              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
744              "Delta [%]"])
745         header_str = ",".join(header) + "\n"
746     except (AttributeError, KeyError) as err:
747         logging.error("The model is invalid, missing parameter: {0}".
748                       format(err))
749         return
750
751     # Prepare data to the table:
752     tbl_dict = dict()
753     for job, builds in table["data"].items():
754         for build in builds:
755             for tst_name, tst_data in data[job][str(build)].iteritems():
756                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
757                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
758                     replace("-ndrdisc", "").replace("-pdr", "").\
759                     replace("-ndr", "").\
760                     replace("1t1c", "1c").replace("2t1c", "1c").\
761                     replace("2t2c", "2c").replace("4t2c", "2c").\
762                     replace("4t4c", "4c").replace("8t4c", "4c")
763                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
764                 if tbl_dict.get(tst_name_mod, None) is None:
765                     name = "-".join(tst_data["name"].split("-")[:-1])
766                     tbl_dict[tst_name_mod] = {"name": name,
767                                               "ref-data": list(),
768                                               "cmp-data": list()}
769                 try:
770                     if table["include-tests"] == "MRR":
771                         result = tst_data["result"]["receive-rate"].avg
772                     elif table["include-tests"] == "PDR":
773                         result = tst_data["throughput"]["PDR"]["LOWER"]
774                     elif table["include-tests"] == "NDR":
775                         result = tst_data["throughput"]["NDR"]["LOWER"]
776                     else:
777                         result = None
778
779                     if result:
780                         if table["reference"]["nic"] in tst_data["tags"]:
781                             tbl_dict[tst_name_mod]["ref-data"].append(result)
782                         elif table["compare"]["nic"] in tst_data["tags"]:
783                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
784                 except (TypeError, KeyError) as err:
785                     logging.debug("No data for {0}".format(tst_name))
786                     logging.debug(repr(err))
787                     # No data in output.xml for this test
788
789     tbl_lst = list()
790     for tst_name in tbl_dict.keys():
791         item = [tbl_dict[tst_name]["name"], ]
792         data_t = tbl_dict[tst_name]["ref-data"]
793         if data_t:
794             item.append(round(mean(data_t) / 1000000, 2))
795             item.append(round(stdev(data_t) / 1000000, 2))
796         else:
797             item.extend([None, None])
798         data_t = tbl_dict[tst_name]["cmp-data"]
799         if data_t:
800             item.append(round(mean(data_t) / 1000000, 2))
801             item.append(round(stdev(data_t) / 1000000, 2))
802         else:
803             item.extend([None, None])
804         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
805             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
806         if len(item) == len(header):
807             tbl_lst.append(item)
808
809     # Sort the table according to the relative change
810     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
811
812     # Generate csv tables:
813     csv_file = "{0}.csv".format(table["output-file"])
814     with open(csv_file, "w") as file_handler:
815         file_handler.write(header_str)
816         for test in tbl_lst:
817             file_handler.write(",".join([str(item) for item in test]) + "\n")
818
819     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
820
821
822 def table_soak_vs_ndr(table, input_data):
823     """Generate the table(s) with algorithm: table_soak_vs_ndr
824     specified in the specification file.
825
826     :param table: Table to generate.
827     :param input_data: Data to process.
828     :type table: pandas.Series
829     :type input_data: InputData
830     """
831
832     logging.info("  Generating the table {0} ...".
833                  format(table.get("title", "")))
834
835     # Transform the data
836     logging.info("    Creating the data set for the {0} '{1}'.".
837                  format(table.get("type", ""), table.get("title", "")))
838     data = input_data.filter_data(table, continue_on_error=True)
839
840     # Prepare the header of the table
841     try:
842         header = [
843             "Test case",
844             "{0} Thput [Mpps]".format(table["reference"]["title"]),
845             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
846             "{0} Thput [Mpps]".format(table["compare"]["title"]),
847             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
848             "Delta [%]", "Stdev of delta [%]"]
849         header_str = ",".join(header) + "\n"
850     except (AttributeError, KeyError) as err:
851         logging.error("The model is invalid, missing parameter: {0}".
852                       format(err))
853         return
854
855     # Create a list of available SOAK test results:
856     tbl_dict = dict()
857     for job, builds in table["compare"]["data"].items():
858         for build in builds:
859             for tst_name, tst_data in data[job][str(build)].iteritems():
860                 if tst_data["type"] == "SOAK":
861                     tst_name_mod = tst_name.replace("-soak", "")
862                     if tbl_dict.get(tst_name_mod, None) is None:
863                         groups = re.search(REGEX_NIC, tst_data["parent"])
864                         nic = groups.group(0) if groups else ""
865                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
866                                                               split("-")[:-1]))
867                         tbl_dict[tst_name_mod] = {
868                             "name": name,
869                             "ref-data": list(),
870                             "cmp-data": list()
871                         }
872                     try:
873                         tbl_dict[tst_name_mod]["cmp-data"].append(
874                             tst_data["throughput"]["LOWER"])
875                     except (KeyError, TypeError):
876                         pass
877     tests_lst = tbl_dict.keys()
878
879     # Add corresponding NDR test results:
880     for job, builds in table["reference"]["data"].items():
881         for build in builds:
882             for tst_name, tst_data in data[job][str(build)].iteritems():
883                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
884                     replace("-mrr", "")
885                 if tst_name_mod in tests_lst:
886                     try:
887                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
888                             if table["include-tests"] == "MRR":
889                                 result = tst_data["result"]["receive-rate"].avg
890                             elif table["include-tests"] == "PDR":
891                                 result = tst_data["throughput"]["PDR"]["LOWER"]
892                             elif table["include-tests"] == "NDR":
893                                 result = tst_data["throughput"]["NDR"]["LOWER"]
894                             else:
895                                 result = None
896                             if result is not None:
897                                 tbl_dict[tst_name_mod]["ref-data"].append(
898                                     result)
899                     except (KeyError, TypeError):
900                         continue
901
902     tbl_lst = list()
903     for tst_name in tbl_dict.keys():
904         item = [tbl_dict[tst_name]["name"], ]
905         data_r = tbl_dict[tst_name]["ref-data"]
906         if data_r:
907             data_r_mean = mean(data_r)
908             item.append(round(data_r_mean / 1000000, 2))
909             data_r_stdev = stdev(data_r)
910             item.append(round(data_r_stdev / 1000000, 2))
911         else:
912             data_r_mean = None
913             data_r_stdev = None
914             item.extend([None, None])
915         data_c = tbl_dict[tst_name]["cmp-data"]
916         if data_c:
917             data_c_mean = mean(data_c)
918             item.append(round(data_c_mean / 1000000, 2))
919             data_c_stdev = stdev(data_c)
920             item.append(round(data_c_stdev / 1000000, 2))
921         else:
922             data_c_mean = None
923             data_c_stdev = None
924             item.extend([None, None])
925         if data_r_mean and data_c_mean:
926             delta, d_stdev = relative_change_stdev(
927                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
928             item.append(round(delta, 2))
929             item.append(round(d_stdev, 2))
930             tbl_lst.append(item)
931
932     # Sort the table according to the relative change
933     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
934
935     # Generate csv tables:
936     csv_file = "{0}.csv".format(table["output-file"])
937     with open(csv_file, "w") as file_handler:
938         file_handler.write(header_str)
939         for test in tbl_lst:
940             file_handler.write(",".join([str(item) for item in test]) + "\n")
941
942     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
943
944
945 def table_performance_trending_dashboard(table, input_data):
946     """Generate the table(s) with algorithm:
947     table_performance_trending_dashboard
948     specified in the specification file.
949
950     :param table: Table to generate.
951     :param input_data: Data to process.
952     :type table: pandas.Series
953     :type input_data: InputData
954     """
955
956     logging.info("  Generating the table {0} ...".
957                  format(table.get("title", "")))
958
959     # Transform the data
960     logging.info("    Creating the data set for the {0} '{1}'.".
961                  format(table.get("type", ""), table.get("title", "")))
962     data = input_data.filter_data(table, continue_on_error=True)
963
964     # Prepare the header of the tables
965     header = ["Test Case",
966               "Trend [Mpps]",
967               "Short-Term Change [%]",
968               "Long-Term Change [%]",
969               "Regressions [#]",
970               "Progressions [#]"
971               ]
972     header_str = ",".join(header) + "\n"
973
974     # Prepare data to the table:
975     tbl_dict = dict()
976     for job, builds in table["data"].items():
977         for build in builds:
978             for tst_name, tst_data in data[job][str(build)].iteritems():
979                 if tst_name.lower() in table.get("ignore-list", list()):
980                     continue
981                 if tbl_dict.get(tst_name, None) is None:
982                     groups = re.search(REGEX_NIC, tst_data["parent"])
983                     if not groups:
984                         continue
985                     nic = groups.group(0)
986                     tbl_dict[tst_name] = {
987                         "name": "{0}-{1}".format(nic, tst_data["name"]),
988                         "data": OrderedDict()}
989                 try:
990                     tbl_dict[tst_name]["data"][str(build)] = \
991                         tst_data["result"]["receive-rate"]
992                 except (TypeError, KeyError):
993                     pass  # No data in output.xml for this test
994
995     tbl_lst = list()
996     for tst_name in tbl_dict.keys():
997         data_t = tbl_dict[tst_name]["data"]
998         if len(data_t) < 2:
999             continue
1000
1001         classification_lst, avgs = classify_anomalies(data_t)
1002
1003         win_size = min(len(data_t), table["window"])
1004         long_win_size = min(len(data_t), table["long-trend-window"])
1005
1006         try:
1007             max_long_avg = max(
1008                 [x for x in avgs[-long_win_size:-win_size]
1009                  if not isnan(x)])
1010         except ValueError:
1011             max_long_avg = nan
1012         last_avg = avgs[-1]
1013         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1014
1015         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1016             rel_change_last = nan
1017         else:
1018             rel_change_last = round(
1019                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1020
1021         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1022             rel_change_long = nan
1023         else:
1024             rel_change_long = round(
1025                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1026
1027         if classification_lst:
1028             if isnan(rel_change_last) and isnan(rel_change_long):
1029                 continue
1030             if (isnan(last_avg) or
1031                 isnan(rel_change_last) or
1032                 isnan(rel_change_long)):
1033                 continue
1034             tbl_lst.append(
1035                 [tbl_dict[tst_name]["name"],
1036                  round(last_avg / 1000000, 2),
1037                  rel_change_last,
1038                  rel_change_long,
1039                  classification_lst[-win_size:].count("regression"),
1040                  classification_lst[-win_size:].count("progression")])
1041
1042     tbl_lst.sort(key=lambda rel: rel[0])
1043
1044     tbl_sorted = list()
1045     for nrr in range(table["window"], -1, -1):
1046         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1047         for nrp in range(table["window"], -1, -1):
1048             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1049             tbl_out.sort(key=lambda rel: rel[2])
1050             tbl_sorted.extend(tbl_out)
1051
1052     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1053
1054     logging.info("    Writing file: '{0}'".format(file_name))
1055     with open(file_name, "w") as file_handler:
1056         file_handler.write(header_str)
1057         for test in tbl_sorted:
1058             file_handler.write(",".join([str(item) for item in test]) + '\n')
1059
1060     txt_file_name = "{0}.txt".format(table["output-file"])
1061     logging.info("    Writing file: '{0}'".format(txt_file_name))
1062     convert_csv_to_pretty_txt(file_name, txt_file_name)
1063
1064
1065 def _generate_url(base, testbed, test_name):
1066     """Generate URL to a trending plot from the name of the test case.
1067
1068     :param base: The base part of URL common to all test cases.
1069     :param testbed: The testbed used for testing.
1070     :param test_name: The name of the test case.
1071     :type base: str
1072     :type testbed: str
1073     :type test_name: str
1074     :returns: The URL to the plot with the trending data for the given test
1075         case.
1076     :rtype str
1077     """
1078
1079     url = base
1080     file_name = ""
1081     anchor = ".html#"
1082     feature = ""
1083
1084     if "lbdpdk" in test_name or "lbvpp" in test_name:
1085         file_name = "link_bonding"
1086
1087     elif "114b" in test_name and "vhost" in test_name:
1088         file_name = "vts"
1089
1090     elif "testpmd" in test_name or "l3fwd" in test_name:
1091         file_name = "dpdk"
1092
1093     elif "memif" in test_name:
1094         file_name = "container_memif"
1095         feature = "-base"
1096
1097     elif "srv6" in test_name:
1098         file_name = "srv6"
1099
1100     elif "vhost" in test_name:
1101         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1102             file_name = "vm_vhost_l2"
1103             if "114b" in test_name:
1104                 feature = ""
1105             elif "l2xcbase" in test_name and "x520" in test_name:
1106                 feature = "-base-l2xc"
1107             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1108                 feature = "-base-l2bd"
1109             else:
1110                 feature = "-base"
1111         elif "ip4base" in test_name:
1112             file_name = "vm_vhost_ip4"
1113             feature = "-base"
1114
1115     elif "ipsecbasetnlsw" in test_name:
1116         file_name = "ipsecsw"
1117         feature = "-base-scale"
1118
1119     elif "ipsec" in test_name:
1120         file_name = "ipsec"
1121         feature = "-base-scale"
1122         if "hw-" in test_name:
1123             file_name = "ipsechw"
1124         elif "sw-" in test_name:
1125             file_name = "ipsecsw"
1126         if "-int-" in test_name:
1127             feature = "-base-scale-int"
1128         elif "tnl" in test_name:
1129             feature = "-base-scale-tnl"
1130
1131     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1132         file_name = "ip4_tunnels"
1133         feature = "-base"
1134
1135     elif "ip4base" in test_name or "ip4scale" in test_name:
1136         file_name = "ip4"
1137         if "xl710" in test_name:
1138             feature = "-base-scale-features"
1139         elif "iacl" in test_name:
1140             feature = "-features-iacl"
1141         elif "oacl" in test_name:
1142             feature = "-features-oacl"
1143         elif "snat" in test_name or "cop" in test_name:
1144             feature = "-features"
1145         else:
1146             feature = "-base-scale"
1147
1148     elif "ip6base" in test_name or "ip6scale" in test_name:
1149         file_name = "ip6"
1150         feature = "-base-scale"
1151
1152     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1153             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1154             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1155         file_name = "l2"
1156         if "macip" in test_name:
1157             feature = "-features-macip"
1158         elif "iacl" in test_name:
1159             feature = "-features-iacl"
1160         elif "oacl" in test_name:
1161             feature = "-features-oacl"
1162         else:
1163             feature = "-base-scale"
1164
1165     if "x520" in test_name:
1166         nic = "x520-"
1167     elif "x710" in test_name:
1168         nic = "x710-"
1169     elif "xl710" in test_name:
1170         nic = "xl710-"
1171     elif "xxv710" in test_name:
1172         nic = "xxv710-"
1173     elif "vic1227" in test_name:
1174         nic = "vic1227-"
1175     elif "vic1385" in test_name:
1176         nic = "vic1385-"
1177     elif "x553" in test_name:
1178         nic = "x553-"
1179     else:
1180         nic = ""
1181     anchor += nic
1182
1183     if "64b" in test_name:
1184         framesize = "64b"
1185     elif "78b" in test_name:
1186         framesize = "78b"
1187     elif "imix" in test_name:
1188         framesize = "imix"
1189     elif "9000b" in test_name:
1190         framesize = "9000b"
1191     elif "1518b" in test_name:
1192         framesize = "1518b"
1193     elif "114b" in test_name:
1194         framesize = "114b"
1195     else:
1196         framesize = ""
1197     anchor += framesize + '-'
1198
1199     if "1t1c" in test_name:
1200         anchor += "1t1c"
1201     elif "2t2c" in test_name:
1202         anchor += "2t2c"
1203     elif "4t4c" in test_name:
1204         anchor += "4t4c"
1205     elif "2t1c" in test_name:
1206         anchor += "2t1c"
1207     elif "4t2c" in test_name:
1208         anchor += "4t2c"
1209     elif "8t4c" in test_name:
1210         anchor += "8t4c"
1211
1212     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1213         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1214
1215
1216 def table_performance_trending_dashboard_html(table, input_data):
1217     """Generate the table(s) with algorithm:
1218     table_performance_trending_dashboard_html specified in the specification
1219     file.
1220
1221     :param table: Table to generate.
1222     :param input_data: Data to process.
1223     :type table: dict
1224     :type input_data: InputData
1225     """
1226
1227     testbed = table.get("testbed", None)
1228     if testbed is None:
1229         logging.error("The testbed is not defined for the table '{0}'.".
1230                       format(table.get("title", "")))
1231         return
1232
1233     logging.info("  Generating the table {0} ...".
1234                  format(table.get("title", "")))
1235
1236     try:
1237         with open(table["input-file"], 'rb') as csv_file:
1238             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1239             csv_lst = [item for item in csv_content]
1240     except KeyError:
1241         logging.warning("The input file is not defined.")
1242         return
1243     except csv.Error as err:
1244         logging.warning("Not possible to process the file '{0}'.\n{1}".
1245                         format(table["input-file"], err))
1246         return
1247
1248     # Table:
1249     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1250
1251     # Table header:
1252     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1253     for idx, item in enumerate(csv_lst[0]):
1254         alignment = "left" if idx == 0 else "center"
1255         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1256         th.text = item
1257
1258     # Rows:
1259     colors = {"regression": ("#ffcccc", "#ff9999"),
1260               "progression": ("#c6ecc6", "#9fdf9f"),
1261               "normal": ("#e9f1fb", "#d4e4f7")}
1262     for r_idx, row in enumerate(csv_lst[1:]):
1263         if int(row[4]):
1264             color = "regression"
1265         elif int(row[5]):
1266             color = "progression"
1267         else:
1268             color = "normal"
1269         background = colors[color][r_idx % 2]
1270         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1271
1272         # Columns:
1273         for c_idx, item in enumerate(row):
1274             alignment = "left" if c_idx == 0 else "center"
1275             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1276             # Name:
1277             if c_idx == 0:
1278                 url = _generate_url("../trending/", testbed, item)
1279                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1280                 ref.text = item
1281             else:
1282                 td.text = item
1283     try:
1284         with open(table["output-file"], 'w') as html_file:
1285             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1286             html_file.write(".. raw:: html\n\n\t")
1287             html_file.write(ET.tostring(dashboard))
1288             html_file.write("\n\t<p><br><br></p>\n")
1289     except KeyError:
1290         logging.warning("The output file is not defined.")
1291         return
1292
1293
1294 def table_last_failed_tests(table, input_data):
1295     """Generate the table(s) with algorithm: table_last_failed_tests
1296     specified in the specification file.
1297
1298     :param table: Table to generate.
1299     :param input_data: Data to process.
1300     :type table: pandas.Series
1301     :type input_data: InputData
1302     """
1303
1304     logging.info("  Generating the table {0} ...".
1305                  format(table.get("title", "")))
1306
1307     # Transform the data
1308     logging.info("    Creating the data set for the {0} '{1}'.".
1309                  format(table.get("type", ""), table.get("title", "")))
1310     data = input_data.filter_data(table, continue_on_error=True)
1311
1312     if data is None or data.empty:
1313         logging.warn("    No data for the {0} '{1}'.".
1314                      format(table.get("type", ""), table.get("title", "")))
1315         return
1316
1317     tbl_list = list()
1318     for job, builds in table["data"].items():
1319         for build in builds:
1320             build = str(build)
1321             try:
1322                 version = input_data.metadata(job, build).get("version", "")
1323             except KeyError:
1324                 logging.error("Data for {job}: {build} is not present.".
1325                               format(job=job, build=build))
1326                 return
1327             tbl_list.append(build)
1328             tbl_list.append(version)
1329             for tst_name, tst_data in data[job][build].iteritems():
1330                 if tst_data["status"] != "FAIL":
1331                     continue
1332                 groups = re.search(REGEX_NIC, tst_data["parent"])
1333                 if not groups:
1334                     continue
1335                 nic = groups.group(0)
1336                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1337
1338     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1339     logging.info("    Writing file: '{0}'".format(file_name))
1340     with open(file_name, "w") as file_handler:
1341         for test in tbl_list:
1342             file_handler.write(test + '\n')
1343
1344
1345 def table_failed_tests(table, input_data):
1346     """Generate the table(s) with algorithm: table_failed_tests
1347     specified in the specification file.
1348
1349     :param table: Table to generate.
1350     :param input_data: Data to process.
1351     :type table: pandas.Series
1352     :type input_data: InputData
1353     """
1354
1355     logging.info("  Generating the table {0} ...".
1356                  format(table.get("title", "")))
1357
1358     # Transform the data
1359     logging.info("    Creating the data set for the {0} '{1}'.".
1360                  format(table.get("type", ""), table.get("title", "")))
1361     data = input_data.filter_data(table, continue_on_error=True)
1362
1363     # Prepare the header of the tables
1364     header = ["Test Case",
1365               "Failures [#]",
1366               "Last Failure [Time]",
1367               "Last Failure [VPP-Build-Id]",
1368               "Last Failure [CSIT-Job-Build-Id]"]
1369
1370     # Generate the data for the table according to the model in the table
1371     # specification
1372
1373     now = dt.utcnow()
1374     timeperiod = timedelta(int(table.get("window", 7)))
1375
1376     tbl_dict = dict()
1377     for job, builds in table["data"].items():
1378         for build in builds:
1379             build = str(build)
1380             for tst_name, tst_data in data[job][build].iteritems():
1381                 if tst_name.lower() in table.get("ignore-list", list()):
1382                     continue
1383                 if tbl_dict.get(tst_name, None) is None:
1384                     groups = re.search(REGEX_NIC, tst_data["parent"])
1385                     if not groups:
1386                         continue
1387                     nic = groups.group(0)
1388                     tbl_dict[tst_name] = {
1389                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1390                         "data": OrderedDict()}
1391                 try:
1392                     generated = input_data.metadata(job, build).\
1393                         get("generated", "")
1394                     if not generated:
1395                         continue
1396                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1397                     if (now - then) <= timeperiod:
1398                         tbl_dict[tst_name]["data"][build] = (
1399                             tst_data["status"],
1400                             generated,
1401                             input_data.metadata(job, build).get("version", ""),
1402                             build)
1403                 except (TypeError, KeyError) as err:
1404                     logging.warning("tst_name: {} - err: {}".
1405                                     format(tst_name, repr(err)))
1406
1407     max_fails = 0
1408     tbl_lst = list()
1409     for tst_data in tbl_dict.values():
1410         fails_nr = 0
1411         for val in tst_data["data"].values():
1412             if val[0] == "FAIL":
1413                 fails_nr += 1
1414                 fails_last_date = val[1]
1415                 fails_last_vpp = val[2]
1416                 fails_last_csit = val[3]
1417         if fails_nr:
1418             max_fails = fails_nr if fails_nr > max_fails else max_fails
1419             tbl_lst.append([tst_data["name"],
1420                             fails_nr,
1421                             fails_last_date,
1422                             fails_last_vpp,
1423                             "mrr-daily-build-{0}".format(fails_last_csit)])
1424
1425     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1426     tbl_sorted = list()
1427     for nrf in range(max_fails, -1, -1):
1428         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1429         tbl_sorted.extend(tbl_fails)
1430     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1431
1432     logging.info("    Writing file: '{0}'".format(file_name))
1433     with open(file_name, "w") as file_handler:
1434         file_handler.write(",".join(header) + "\n")
1435         for test in tbl_sorted:
1436             file_handler.write(",".join([str(item) for item in test]) + '\n')
1437
1438     txt_file_name = "{0}.txt".format(table["output-file"])
1439     logging.info("    Writing file: '{0}'".format(txt_file_name))
1440     convert_csv_to_pretty_txt(file_name, txt_file_name)
1441
1442
1443 def table_failed_tests_html(table, input_data):
1444     """Generate the table(s) with algorithm: table_failed_tests_html
1445     specified in the specification file.
1446
1447     :param table: Table to generate.
1448     :param input_data: Data to process.
1449     :type table: pandas.Series
1450     :type input_data: InputData
1451     """
1452
1453     testbed = table.get("testbed", None)
1454     if testbed is None:
1455         logging.error("The testbed is not defined for the table '{0}'.".
1456                       format(table.get("title", "")))
1457         return
1458
1459     logging.info("  Generating the table {0} ...".
1460                  format(table.get("title", "")))
1461
1462     try:
1463         with open(table["input-file"], 'rb') as csv_file:
1464             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1465             csv_lst = [item for item in csv_content]
1466     except KeyError:
1467         logging.warning("The input file is not defined.")
1468         return
1469     except csv.Error as err:
1470         logging.warning("Not possible to process the file '{0}'.\n{1}".
1471                         format(table["input-file"], err))
1472         return
1473
1474     # Table:
1475     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1476
1477     # Table header:
1478     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1479     for idx, item in enumerate(csv_lst[0]):
1480         alignment = "left" if idx == 0 else "center"
1481         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1482         th.text = item
1483
1484     # Rows:
1485     colors = ("#e9f1fb", "#d4e4f7")
1486     for r_idx, row in enumerate(csv_lst[1:]):
1487         background = colors[r_idx % 2]
1488         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1489
1490         # Columns:
1491         for c_idx, item in enumerate(row):
1492             alignment = "left" if c_idx == 0 else "center"
1493             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1494             # Name:
1495             if c_idx == 0:
1496                 url = _generate_url("../trending/", testbed, item)
1497                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1498                 ref.text = item
1499             else:
1500                 td.text = item
1501     try:
1502         with open(table["output-file"], 'w') as html_file:
1503             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1504             html_file.write(".. raw:: html\n\n\t")
1505             html_file.write(ET.tostring(failed_tests))
1506             html_file.write("\n\t<p><br><br></p>\n")
1507     except KeyError:
1508         logging.warning("The output file is not defined.")
1509         return