Report: Add data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def _tpc_modify_test_name(test_name):
191     test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
192         replace("-ndrpdr", "").replace("-pdrdisc", ""). \
193         replace("-ndrdisc", "").replace("-pdr", ""). \
194         replace("-ndr", ""). \
195         replace("1t1c", "1c").replace("2t1c", "1c"). \
196         replace("2t2c", "2c").replace("4t2c", "2c"). \
197         replace("4t4c", "4c").replace("8t4c", "4c")
198     test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
199     return test_name_mod
200
201
202 def _tpc_modify_displayed_test_name(test_name):
203     return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
204         replace("2t2c", "2c").replace("4t2c", "2c"). \
205         replace("4t4c", "4c").replace("8t4c", "4c")
206
207
208 def _tpc_insert_data(target, src, include_tests):
209     try:
210         if include_tests == "MRR":
211             target.append(src["result"]["receive-rate"].avg)
212         elif include_tests == "PDR":
213             target.append(src["throughput"]["PDR"]["LOWER"])
214         elif include_tests == "NDR":
215             target.append(src["throughput"]["NDR"]["LOWER"])
216     except (KeyError, TypeError):
217         pass
218
219
220 def _tpc_sort_table(table):
221     # Sort the table:
222     # 1. New in CSIT-XXXX
223     # 2. See footnote
224     # 3. Delta
225     tbl_new = list()
226     tbl_see = list()
227     tbl_delta = list()
228     for item in table:
229         if isinstance(item[-1], str):
230             if "New in CSIT" in item[-1]:
231                 tbl_new.append(item)
232             elif "See footnote" in item[-1]:
233                 tbl_see.append(item)
234         else:
235             tbl_delta.append(item)
236
237     # Sort the tables:
238     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
239     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
240     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
241     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
242
243     # Put the tables together:
244     table = list()
245     table.extend(tbl_new)
246     table.extend(tbl_see)
247     table.extend(tbl_delta)
248
249     return table
250
251
252 def table_performance_comparison(table, input_data):
253     """Generate the table(s) with algorithm: table_performance_comparison
254     specified in the specification file.
255
256     :param table: Table to generate.
257     :param input_data: Data to process.
258     :type table: pandas.Series
259     :type input_data: InputData
260     """
261
262     logging.info("  Generating the table {0} ...".
263                  format(table.get("title", "")))
264
265     # Transform the data
266     logging.info("    Creating the data set for the {0} '{1}'.".
267                  format(table.get("type", ""), table.get("title", "")))
268     data = input_data.filter_data(table, continue_on_error=True)
269
270     # Prepare the header of the tables
271     try:
272         header = ["Test case", ]
273
274         if table["include-tests"] == "MRR":
275             hdr_param = "Rec Rate"
276         else:
277             hdr_param = "Thput"
278
279         history = table.get("history", None)
280         if history:
281             for item in history:
282                 header.extend(
283                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
284                      "{0} Stdev [Mpps]".format(item["title"])])
285         header.extend(
286             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
287              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
288              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
289              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
290              "Delta [%]"])
291         header_str = ",".join(header) + "\n"
292     except (AttributeError, KeyError) as err:
293         logging.error("The model is invalid, missing parameter: {0}".
294                       format(err))
295         return
296
297     # Prepare data to the table:
298     tbl_dict = dict()
299     for job, builds in table["reference"]["data"].items():
300         topo = "2n-skx" if "2n-skx" in job else ""
301         for build in builds:
302             for tst_name, tst_data in data[job][str(build)].iteritems():
303                 tst_name_mod = _tpc_modify_test_name(tst_name)
304                 if "across topologies" in table["title"].lower():
305                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
306                 if tbl_dict.get(tst_name_mod, None) is None:
307                     groups = re.search(REGEX_NIC, tst_data["parent"])
308                     nic = groups.group(0) if groups else ""
309                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
310                                                           split("-")[:-1]))
311                     if "across testbeds" in table["title"].lower() or \
312                             "across topologies" in table["title"].lower():
313                         name = _tpc_modify_displayed_test_name(name)
314                     tbl_dict[tst_name_mod] = {"name": name,
315                                               "ref-data": list(),
316                                               "cmp-data": list()}
317                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
318                                  src=tst_data,
319                                  include_tests=table["include-tests"])
320
321     for job, builds in table["compare"]["data"].items():
322         for build in builds:
323             for tst_name, tst_data in data[job][str(build)].iteritems():
324                 tst_name_mod = _tpc_modify_test_name(tst_name)
325                 if "across topologies" in table["title"].lower():
326                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
327                 if tbl_dict.get(tst_name_mod, None) is None:
328                     groups = re.search(REGEX_NIC, tst_data["parent"])
329                     nic = groups.group(0) if groups else ""
330                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
331                                                           split("-")[:-1]))
332                     if "across testbeds" in table["title"].lower() or \
333                             "across topologies" in table["title"].lower():
334                         name = _tpc_modify_displayed_test_name(name)
335                     tbl_dict[tst_name_mod] = {"name": name,
336                                               "ref-data": list(),
337                                               "cmp-data": list()}
338                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
339                                  src=tst_data,
340                                  include_tests=table["include-tests"])
341
342     replacement = table["compare"].get("data-replacement", None)
343     if replacement:
344         create_new_list = True
345         rpl_data = input_data.filter_data(
346             table, data=replacement, continue_on_error=True)
347         for job, builds in replacement.items():
348             for build in builds:
349                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
350                     tst_name_mod = _tpc_modify_test_name(tst_name)
351                     if "across topologies" in table["title"].lower():
352                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
353                     if tbl_dict.get(tst_name_mod, None) is None:
354                         name = "{0}".format("-".join(tst_data["name"].
355                                                      split("-")[:-1]))
356                         if "across testbeds" in table["title"].lower() or \
357                                 "across topologies" in table["title"].lower():
358                             name = _tpc_modify_displayed_test_name(name)
359                         tbl_dict[tst_name_mod] = {"name": name,
360                                                   "ref-data": list(),
361                                                   "cmp-data": list()}
362                     if create_new_list:
363                         create_new_list = False
364                         tbl_dict[tst_name_mod]["cmp-data"] = list()
365
366                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
367                                      src=tst_data,
368                                      include_tests=table["include-tests"])
369
370     if history:
371         for item in history:
372             for job, builds in item["data"].items():
373                 for build in builds:
374                     for tst_name, tst_data in data[job][str(build)].iteritems():
375                         tst_name_mod = _tpc_modify_test_name(tst_name)
376                         if "across topologies" in table["title"].lower():
377                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
378                         if tbl_dict.get(tst_name_mod, None) is None:
379                             continue
380                         if tbl_dict[tst_name_mod].get("history", None) is None:
381                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
382                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
383                                                              None) is None:
384                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
385                                 list()
386                         try:
387                             # TODO: Re-work when NDRPDRDISC tests are not used
388                             if table["include-tests"] == "MRR":
389                                 tbl_dict[tst_name_mod]["history"][item["title"
390                                 ]].append(tst_data["result"]["receive-rate"].
391                                           avg)
392                             elif table["include-tests"] == "PDR":
393                                 if tst_data["type"] == "PDR":
394                                     tbl_dict[tst_name_mod]["history"][
395                                         item["title"]].\
396                                         append(tst_data["throughput"]["value"])
397                                 elif tst_data["type"] == "NDRPDR":
398                                     tbl_dict[tst_name_mod]["history"][item[
399                                         "title"]].append(tst_data["throughput"][
400                                         "PDR"]["LOWER"])
401                             elif table["include-tests"] == "NDR":
402                                 if tst_data["type"] == "NDR":
403                                     tbl_dict[tst_name_mod]["history"][
404                                         item["title"]].\
405                                         append(tst_data["throughput"]["value"])
406                                 elif tst_data["type"] == "NDRPDR":
407                                     tbl_dict[tst_name_mod]["history"][item[
408                                         "title"]].append(tst_data["throughput"][
409                                         "NDR"]["LOWER"])
410                             else:
411                                 continue
412                         except (TypeError, KeyError):
413                             pass
414
415     tbl_lst = list()
416     footnote = False
417     for tst_name in tbl_dict.keys():
418         item = [tbl_dict[tst_name]["name"], ]
419         if history:
420             if tbl_dict[tst_name].get("history", None) is not None:
421                 for hist_data in tbl_dict[tst_name]["history"].values():
422                     if hist_data:
423                         item.append(round(mean(hist_data) / 1000000, 2))
424                         item.append(round(stdev(hist_data) / 1000000, 2))
425                     else:
426                         item.extend(["Not tested", "Not tested"])
427             else:
428                 item.extend(["Not tested", "Not tested"])
429         data_t = tbl_dict[tst_name]["ref-data"]
430         if data_t:
431             item.append(round(mean(data_t) / 1000000, 2))
432             item.append(round(stdev(data_t) / 1000000, 2))
433         else:
434             item.extend(["Not tested", "Not tested"])
435         data_t = tbl_dict[tst_name]["cmp-data"]
436         if data_t:
437             item.append(round(mean(data_t) / 1000000, 2))
438             item.append(round(stdev(data_t) / 1000000, 2))
439         else:
440             item.extend(["Not tested", "Not tested"])
441         if item[-2] == "Not tested":
442             pass
443         elif item[-4] == "Not tested":
444             item.append("New in CSIT-1908")
445         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
446             item.append("See footnote [1]")
447             footnote = True
448         elif item[-4] != 0:
449             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
450         if (len(item) == len(header)) and (item[-3] != "Not tested"):
451             tbl_lst.append(item)
452
453     tbl_lst = _tpc_sort_table(tbl_lst)
454
455     # Generate csv tables:
456     csv_file = "{0}.csv".format(table["output-file"])
457     with open(csv_file, "w") as file_handler:
458         file_handler.write(header_str)
459         for test in tbl_lst:
460             file_handler.write(",".join([str(item) for item in test]) + "\n")
461
462     txt_file_name = "{0}.txt".format(table["output-file"])
463     convert_csv_to_pretty_txt(csv_file, txt_file_name)
464
465     if footnote:
466         with open(txt_file_name, 'a') as txt_file:
467             txt_file.writelines([
468                 "\nFootnotes:\n",
469                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
470                 "2-node testbeds, dot1q encapsulation is now used on both "
471                 "links of SUT.\n",
472                 "    Previously dot1q was used only on a single link with the "
473                 "other link carrying untagged Ethernet frames. This changes "
474                 "results\n",
475                 "    in slightly lower throughput in CSIT-1908 for these "
476                 "tests. See release notes."
477             ])
478
479
480 def table_performance_comparison_nic(table, input_data):
481     """Generate the table(s) with algorithm: table_performance_comparison
482     specified in the specification file.
483
484     :param table: Table to generate.
485     :param input_data: Data to process.
486     :type table: pandas.Series
487     :type input_data: InputData
488     """
489
490     logging.info("  Generating the table {0} ...".
491                  format(table.get("title", "")))
492
493     # Transform the data
494     logging.info("    Creating the data set for the {0} '{1}'.".
495                  format(table.get("type", ""), table.get("title", "")))
496     data = input_data.filter_data(table, continue_on_error=True)
497
498     # Prepare the header of the tables
499     try:
500         header = ["Test case", ]
501
502         if table["include-tests"] == "MRR":
503             hdr_param = "Rec Rate"
504         else:
505             hdr_param = "Thput"
506
507         history = table.get("history", None)
508         if history:
509             for item in history:
510                 header.extend(
511                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
512                      "{0} Stdev [Mpps]".format(item["title"])])
513         header.extend(
514             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
515              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
516              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
517              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
518              "Delta [%]"])
519         header_str = ",".join(header) + "\n"
520     except (AttributeError, KeyError) as err:
521         logging.error("The model is invalid, missing parameter: {0}".
522                       format(err))
523         return
524
525     # Prepare data to the table:
526     tbl_dict = dict()
527     for job, builds in table["reference"]["data"].items():
528         topo = "2n-skx" if "2n-skx" in job else ""
529         for build in builds:
530             for tst_name, tst_data in data[job][str(build)].iteritems():
531                 if table["reference"]["nic"] not in tst_data["tags"]:
532                     continue
533                 tst_name_mod = _tpc_modify_test_name(tst_name)
534                 if "across topologies" in table["title"].lower():
535                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
536                 if tbl_dict.get(tst_name_mod, None) is None:
537                     name = "{0}".format("-".join(tst_data["name"].
538                                                  split("-")[:-1]))
539                     if "across testbeds" in table["title"].lower() or \
540                             "across topologies" in table["title"].lower():
541                         name = _tpc_modify_displayed_test_name(name)
542                     tbl_dict[tst_name_mod] = {"name": name,
543                                               "ref-data": list(),
544                                               "cmp-data": list()}
545                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
546                                  src=tst_data,
547                                  include_tests=table["include-tests"])
548
549     for job, builds in table["compare"]["data"].items():
550         for build in builds:
551             for tst_name, tst_data in data[job][str(build)].iteritems():
552                 if table["compare"]["nic"] not in tst_data["tags"]:
553                     continue
554                 tst_name_mod = _tpc_modify_test_name(tst_name)
555                 if "across topologies" in table["title"].lower():
556                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
557                 if tbl_dict.get(tst_name_mod, None) is None:
558                     name = "{0}".format("-".join(tst_data["name"].
559                                                  split("-")[:-1]))
560                     if "across testbeds" in table["title"].lower() or \
561                             "across topologies" in table["title"].lower():
562                         name = _tpc_modify_displayed_test_name(name)
563                     tbl_dict[tst_name_mod] = {"name": name,
564                                               "ref-data": list(),
565                                               "cmp-data": list()}
566                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
567                                  src=tst_data,
568                                  include_tests=table["include-tests"])
569
570     replacement = table["compare"].get("data-replacement", None)
571     if replacement:
572         create_new_list = True
573         rpl_data = input_data.filter_data(
574             table, data=replacement, continue_on_error=True)
575         for job, builds in replacement.items():
576             for build in builds:
577                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
578                     if table["compare"]["nic"] not in tst_data["tags"]:
579                         continue
580                     tst_name_mod = _tpc_modify_test_name(tst_name)
581                     if "across topologies" in table["title"].lower():
582                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
583                     if tbl_dict.get(tst_name_mod, None) is None:
584                         name = "{0}".format("-".join(tst_data["name"].
585                                                      split("-")[:-1]))
586                         if "across testbeds" in table["title"].lower() or \
587                                 "across topologies" in table["title"].lower():
588                             name = _tpc_modify_displayed_test_name(name)
589                         tbl_dict[tst_name_mod] = {"name": name,
590                                                   "ref-data": list(),
591                                                   "cmp-data": list()}
592                     if create_new_list:
593                         create_new_list = False
594                         tbl_dict[tst_name_mod]["cmp-data"] = list()
595
596                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
597                                      src=tst_data,
598                                      include_tests=table["include-tests"])
599
600     if history:
601         for item in history:
602             for job, builds in item["data"].items():
603                 for build in builds:
604                     for tst_name, tst_data in data[job][str(build)].iteritems():
605                         if item["nic"] not in tst_data["tags"]:
606                             continue
607                         tst_name_mod = _tpc_modify_test_name(tst_name)
608                         if "across topologies" in table["title"].lower():
609                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
610                         if tbl_dict.get(tst_name_mod, None) is None:
611                             continue
612                         if tbl_dict[tst_name_mod].get("history", None) is None:
613                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
614                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
615                                                              None) is None:
616                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
617                                 list()
618                         try:
619                             # TODO: Re-work when NDRPDRDISC tests are not used
620                             if table["include-tests"] == "MRR":
621                                 tbl_dict[tst_name_mod]["history"][item["title"
622                                 ]].append(tst_data["result"]["receive-rate"].
623                                           avg)
624                             elif table["include-tests"] == "PDR":
625                                 if tst_data["type"] == "PDR":
626                                     tbl_dict[tst_name_mod]["history"][
627                                         item["title"]].\
628                                         append(tst_data["throughput"]["value"])
629                                 elif tst_data["type"] == "NDRPDR":
630                                     tbl_dict[tst_name_mod]["history"][item[
631                                         "title"]].append(tst_data["throughput"][
632                                         "PDR"]["LOWER"])
633                             elif table["include-tests"] == "NDR":
634                                 if tst_data["type"] == "NDR":
635                                     tbl_dict[tst_name_mod]["history"][
636                                         item["title"]].\
637                                         append(tst_data["throughput"]["value"])
638                                 elif tst_data["type"] == "NDRPDR":
639                                     tbl_dict[tst_name_mod]["history"][item[
640                                         "title"]].append(tst_data["throughput"][
641                                         "NDR"]["LOWER"])
642                             else:
643                                 continue
644                         except (TypeError, KeyError):
645                             pass
646
647     tbl_lst = list()
648     footnote = False
649     for tst_name in tbl_dict.keys():
650         item = [tbl_dict[tst_name]["name"], ]
651         if history:
652             if tbl_dict[tst_name].get("history", None) is not None:
653                 for hist_data in tbl_dict[tst_name]["history"].values():
654                     if hist_data:
655                         item.append(round(mean(hist_data) / 1000000, 2))
656                         item.append(round(stdev(hist_data) / 1000000, 2))
657                     else:
658                         item.extend(["Not tested", "Not tested"])
659             else:
660                 item.extend(["Not tested", "Not tested"])
661         data_t = tbl_dict[tst_name]["ref-data"]
662         if data_t:
663             item.append(round(mean(data_t) / 1000000, 2))
664             item.append(round(stdev(data_t) / 1000000, 2))
665         else:
666             item.extend(["Not tested", "Not tested"])
667         data_t = tbl_dict[tst_name]["cmp-data"]
668         if data_t:
669             item.append(round(mean(data_t) / 1000000, 2))
670             item.append(round(stdev(data_t) / 1000000, 2))
671         else:
672             item.extend(["Not tested", "Not tested"])
673         if item[-2] == "Not tested":
674             pass
675         elif item[-4] == "Not tested":
676             item.append("New in CSIT-1908")
677         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
678             item.append("See footnote [1]")
679             footnote = True
680         elif item[-4] != 0:
681             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
682         if (len(item) == len(header)) and (item[-3] != "Not tested"):
683             tbl_lst.append(item)
684
685     tbl_lst = _tpc_sort_table(tbl_lst)
686
687     # Generate csv tables:
688     csv_file = "{0}.csv".format(table["output-file"])
689     with open(csv_file, "w") as file_handler:
690         file_handler.write(header_str)
691         for test in tbl_lst:
692             file_handler.write(",".join([str(item) for item in test]) + "\n")
693
694     txt_file_name = "{0}.txt".format(table["output-file"])
695     convert_csv_to_pretty_txt(csv_file, txt_file_name)
696
697     if footnote:
698         with open(txt_file_name, 'a') as txt_file:
699             txt_file.writelines([
700                 "\nFootnotes:\n",
701                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
702                 "2-node testbeds, dot1q encapsulation is now used on both "
703                 "links of SUT.\n",
704                 "    Previously dot1q was used only on a single link with the "
705                 "other link carrying untagged Ethernet frames. This changes "
706                 "results\n",
707                 "    in slightly lower throughput in CSIT-1908 for these "
708                 "tests. See release notes."
709             ])
710
711
712 def table_nics_comparison(table, input_data):
713     """Generate the table(s) with algorithm: table_nics_comparison
714     specified in the specification file.
715
716     :param table: Table to generate.
717     :param input_data: Data to process.
718     :type table: pandas.Series
719     :type input_data: InputData
720     """
721
722     logging.info("  Generating the table {0} ...".
723                  format(table.get("title", "")))
724
725     # Transform the data
726     logging.info("    Creating the data set for the {0} '{1}'.".
727                  format(table.get("type", ""), table.get("title", "")))
728     data = input_data.filter_data(table, continue_on_error=True)
729
730     # Prepare the header of the tables
731     try:
732         header = ["Test case", ]
733
734         if table["include-tests"] == "MRR":
735             hdr_param = "Rec Rate"
736         else:
737             hdr_param = "Thput"
738
739         header.extend(
740             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
741              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
742              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
743              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
744              "Delta [%]"])
745         header_str = ",".join(header) + "\n"
746     except (AttributeError, KeyError) as err:
747         logging.error("The model is invalid, missing parameter: {0}".
748                       format(err))
749         return
750
751     # Prepare data to the table:
752     tbl_dict = dict()
753     for job, builds in table["data"].items():
754         for build in builds:
755             for tst_name, tst_data in data[job][str(build)].iteritems():
756                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
757                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
758                     replace("-ndrdisc", "").replace("-pdr", "").\
759                     replace("-ndr", "").\
760                     replace("1t1c", "1c").replace("2t1c", "1c").\
761                     replace("2t2c", "2c").replace("4t2c", "2c").\
762                     replace("4t4c", "4c").replace("8t4c", "4c")
763                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
764                 if tbl_dict.get(tst_name_mod, None) is None:
765                     name = "-".join(tst_data["name"].split("-")[:-1])
766                     tbl_dict[tst_name_mod] = {"name": name,
767                                               "ref-data": list(),
768                                               "cmp-data": list()}
769                 try:
770                     if table["include-tests"] == "MRR":
771                         result = tst_data["result"]["receive-rate"].avg
772                     elif table["include-tests"] == "PDR":
773                         result = tst_data["throughput"]["PDR"]["LOWER"]
774                     elif table["include-tests"] == "NDR":
775                         result = tst_data["throughput"]["NDR"]["LOWER"]
776                     else:
777                         result = None
778
779                     if result:
780                         if table["reference"]["nic"] in tst_data["tags"]:
781                             tbl_dict[tst_name_mod]["ref-data"].append(result)
782                         elif table["compare"]["nic"] in tst_data["tags"]:
783                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
784                 except (TypeError, KeyError) as err:
785                     logging.debug("No data for {0}".format(tst_name))
786                     logging.debug(repr(err))
787                     # No data in output.xml for this test
788
789     tbl_lst = list()
790     for tst_name in tbl_dict.keys():
791         item = [tbl_dict[tst_name]["name"], ]
792         data_t = tbl_dict[tst_name]["ref-data"]
793         if data_t:
794             item.append(round(mean(data_t) / 1000000, 2))
795             item.append(round(stdev(data_t) / 1000000, 2))
796         else:
797             item.extend([None, None])
798         data_t = tbl_dict[tst_name]["cmp-data"]
799         if data_t:
800             item.append(round(mean(data_t) / 1000000, 2))
801             item.append(round(stdev(data_t) / 1000000, 2))
802         else:
803             item.extend([None, None])
804         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
805             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
806         if len(item) == len(header):
807             tbl_lst.append(item)
808
809     # Sort the table according to the relative change
810     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
811
812     # Generate csv tables:
813     csv_file = "{0}.csv".format(table["output-file"])
814     with open(csv_file, "w") as file_handler:
815         file_handler.write(header_str)
816         for test in tbl_lst:
817             file_handler.write(",".join([str(item) for item in test]) + "\n")
818
819     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
820
821
822 def table_soak_vs_ndr(table, input_data):
823     """Generate the table(s) with algorithm: table_soak_vs_ndr
824     specified in the specification file.
825
826     :param table: Table to generate.
827     :param input_data: Data to process.
828     :type table: pandas.Series
829     :type input_data: InputData
830     """
831
832     logging.info("  Generating the table {0} ...".
833                  format(table.get("title", "")))
834
835     # Transform the data
836     logging.info("    Creating the data set for the {0} '{1}'.".
837                  format(table.get("type", ""), table.get("title", "")))
838     data = input_data.filter_data(table, continue_on_error=True)
839
840     # Prepare the header of the table
841     try:
842         header = [
843             "Test case",
844             "{0} Thput [Mpps]".format(table["reference"]["title"]),
845             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
846             "{0} Thput [Mpps]".format(table["compare"]["title"]),
847             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
848             "Delta [%]", "Stdev of delta [%]"]
849         header_str = ",".join(header) + "\n"
850     except (AttributeError, KeyError) as err:
851         logging.error("The model is invalid, missing parameter: {0}".
852                       format(err))
853         return
854
855     # Create a list of available SOAK test results:
856     tbl_dict = dict()
857     for job, builds in table["compare"]["data"].items():
858         for build in builds:
859             for tst_name, tst_data in data[job][str(build)].iteritems():
860                 if tst_data["type"] == "SOAK":
861                     tst_name_mod = tst_name.replace("-soak", "")
862                     if tbl_dict.get(tst_name_mod, None) is None:
863                         groups = re.search(REGEX_NIC, tst_data["parent"])
864                         nic = groups.group(0) if groups else ""
865                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
866                                                               split("-")[:-1]))
867                         tbl_dict[tst_name_mod] = {
868                             "name": name,
869                             "ref-data": list(),
870                             "cmp-data": list()
871                         }
872                     try:
873                         tbl_dict[tst_name_mod]["cmp-data"].append(
874                             tst_data["throughput"]["LOWER"])
875                     except (KeyError, TypeError):
876                         pass
877     tests_lst = tbl_dict.keys()
878
879     # Add corresponding NDR test results:
880     for job, builds in table["reference"]["data"].items():
881         for build in builds:
882             for tst_name, tst_data in data[job][str(build)].iteritems():
883                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
884                     replace("-mrr", "")
885                 if tst_name_mod in tests_lst:
886                     try:
887                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
888                             if table["include-tests"] == "MRR":
889                                 result = tst_data["result"]["receive-rate"].avg
890                             elif table["include-tests"] == "PDR":
891                                 result = tst_data["throughput"]["PDR"]["LOWER"]
892                             elif table["include-tests"] == "NDR":
893                                 result = tst_data["throughput"]["NDR"]["LOWER"]
894                             else:
895                                 result = None
896                             if result is not None:
897                                 tbl_dict[tst_name_mod]["ref-data"].append(
898                                     result)
899                     except (KeyError, TypeError):
900                         continue
901
902     tbl_lst = list()
903     for tst_name in tbl_dict.keys():
904         item = [tbl_dict[tst_name]["name"], ]
905         data_r = tbl_dict[tst_name]["ref-data"]
906         if data_r:
907             data_r_mean = mean(data_r)
908             item.append(round(data_r_mean / 1000000, 2))
909             data_r_stdev = stdev(data_r)
910             item.append(round(data_r_stdev / 1000000, 2))
911         else:
912             data_r_mean = None
913             data_r_stdev = None
914             item.extend([None, None])
915         data_c = tbl_dict[tst_name]["cmp-data"]
916         if data_c:
917             data_c_mean = mean(data_c)
918             item.append(round(data_c_mean / 1000000, 2))
919             data_c_stdev = stdev(data_c)
920             item.append(round(data_c_stdev / 1000000, 2))
921         else:
922             data_c_mean = None
923             data_c_stdev = None
924             item.extend([None, None])
925         if data_r_mean and data_c_mean:
926             delta, d_stdev = relative_change_stdev(
927                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
928             item.append(round(delta, 2))
929             item.append(round(d_stdev, 2))
930             tbl_lst.append(item)
931
932     # Sort the table according to the relative change
933     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
934
935     # Generate csv tables:
936     csv_file = "{0}.csv".format(table["output-file"])
937     with open(csv_file, "w") as file_handler:
938         file_handler.write(header_str)
939         for test in tbl_lst:
940             file_handler.write(",".join([str(item) for item in test]) + "\n")
941
942     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
943
944
945 def table_performance_trending_dashboard(table, input_data):
946     """Generate the table(s) with algorithm:
947     table_performance_trending_dashboard
948     specified in the specification file.
949
950     :param table: Table to generate.
951     :param input_data: Data to process.
952     :type table: pandas.Series
953     :type input_data: InputData
954     """
955
956     logging.info("  Generating the table {0} ...".
957                  format(table.get("title", "")))
958
959     # Transform the data
960     logging.info("    Creating the data set for the {0} '{1}'.".
961                  format(table.get("type", ""), table.get("title", "")))
962     data = input_data.filter_data(table, continue_on_error=True)
963
964     # Prepare the header of the tables
965     header = ["Test Case",
966               "Trend [Mpps]",
967               "Short-Term Change [%]",
968               "Long-Term Change [%]",
969               "Regressions [#]",
970               "Progressions [#]"
971               ]
972     header_str = ",".join(header) + "\n"
973
974     # Prepare data to the table:
975     tbl_dict = dict()
976     for job, builds in table["data"].items():
977         for build in builds:
978             for tst_name, tst_data in data[job][str(build)].iteritems():
979                 if tst_name.lower() in table.get("ignore-list", list()):
980                     continue
981                 if tbl_dict.get(tst_name, None) is None:
982                     groups = re.search(REGEX_NIC, tst_data["parent"])
983                     if not groups:
984                         continue
985                     nic = groups.group(0)
986                     tbl_dict[tst_name] = {
987                         "name": "{0}-{1}".format(nic, tst_data["name"]),
988                         "data": OrderedDict()}
989                 try:
990                     tbl_dict[tst_name]["data"][str(build)] = \
991                         tst_data["result"]["receive-rate"]
992                 except (TypeError, KeyError):
993                     pass  # No data in output.xml for this test
994
995     tbl_lst = list()
996     for tst_name in tbl_dict.keys():
997         data_t = tbl_dict[tst_name]["data"]
998         if len(data_t) < 2:
999             continue
1000
1001         classification_lst, avgs = classify_anomalies(data_t)
1002
1003         win_size = min(len(data_t), table["window"])
1004         long_win_size = min(len(data_t), table["long-trend-window"])
1005
1006         try:
1007             max_long_avg = max(
1008                 [x for x in avgs[-long_win_size:-win_size]
1009                  if not isnan(x)])
1010         except ValueError:
1011             max_long_avg = nan
1012         last_avg = avgs[-1]
1013         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1014
1015         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1016             rel_change_last = nan
1017         else:
1018             rel_change_last = round(
1019                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1020
1021         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1022             rel_change_long = nan
1023         else:
1024             rel_change_long = round(
1025                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1026
1027         if classification_lst:
1028             if isnan(rel_change_last) and isnan(rel_change_long):
1029                 continue
1030             if (isnan(last_avg) or
1031                 isnan(rel_change_last) or
1032                 isnan(rel_change_long)):
1033                 continue
1034             tbl_lst.append(
1035                 [tbl_dict[tst_name]["name"],
1036                  round(last_avg / 1000000, 2),
1037                  rel_change_last,
1038                  rel_change_long,
1039                  classification_lst[-win_size:].count("regression"),
1040                  classification_lst[-win_size:].count("progression")])
1041
1042     tbl_lst.sort(key=lambda rel: rel[0])
1043
1044     tbl_sorted = list()
1045     for nrr in range(table["window"], -1, -1):
1046         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1047         for nrp in range(table["window"], -1, -1):
1048             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1049             tbl_out.sort(key=lambda rel: rel[2])
1050             tbl_sorted.extend(tbl_out)
1051
1052     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1053
1054     logging.info("    Writing file: '{0}'".format(file_name))
1055     with open(file_name, "w") as file_handler:
1056         file_handler.write(header_str)
1057         for test in tbl_sorted:
1058             file_handler.write(",".join([str(item) for item in test]) + '\n')
1059
1060     txt_file_name = "{0}.txt".format(table["output-file"])
1061     logging.info("    Writing file: '{0}'".format(txt_file_name))
1062     convert_csv_to_pretty_txt(file_name, txt_file_name)
1063
1064
1065 def _generate_url(base, testbed, test_name):
1066     """Generate URL to a trending plot from the name of the test case.
1067
1068     :param base: The base part of URL common to all test cases.
1069     :param testbed: The testbed used for testing.
1070     :param test_name: The name of the test case.
1071     :type base: str
1072     :type testbed: str
1073     :type test_name: str
1074     :returns: The URL to the plot with the trending data for the given test
1075         case.
1076     :rtype str
1077     """
1078
1079     url = base
1080     file_name = ""
1081     anchor = ".html#"
1082     feature = ""
1083
1084     if "lbdpdk" in test_name or "lbvpp" in test_name:
1085         file_name = "link_bonding"
1086
1087     elif "114b" in test_name and "vhost" in test_name:
1088         file_name = "vts"
1089
1090     elif "testpmd" in test_name or "l3fwd" in test_name:
1091         file_name = "dpdk"
1092
1093     elif "memif" in test_name:
1094         file_name = "container_memif"
1095         feature = "-base"
1096
1097     elif "srv6" in test_name:
1098         file_name = "srv6"
1099
1100     elif "vhost" in test_name:
1101         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1102             file_name = "vm_vhost_l2"
1103             if "114b" in test_name:
1104                 feature = ""
1105             elif "l2xcbase" in test_name and "x520" in test_name:
1106                 feature = "-base-l2xc"
1107             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1108                 feature = "-base-l2bd"
1109             else:
1110                 feature = "-base"
1111         elif "ip4base" in test_name:
1112             file_name = "vm_vhost_ip4"
1113             feature = "-base"
1114
1115     elif "ipsecbasetnlsw" in test_name:
1116         file_name = "ipsecsw"
1117         feature = "-base-scale"
1118
1119     elif "ipsec" in test_name:
1120         file_name = "ipsec"
1121         feature = "-base-scale"
1122         if "hw-" in test_name:
1123             file_name = "ipsechw"
1124         elif "sw-" in test_name:
1125             file_name = "ipsecsw"
1126
1127     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1128         file_name = "ip4_tunnels"
1129         feature = "-base"
1130
1131     elif "ip4base" in test_name or "ip4scale" in test_name:
1132         file_name = "ip4"
1133         if "xl710" in test_name:
1134             feature = "-base-scale-features"
1135         elif "iacl" in test_name:
1136             feature = "-features-iacl"
1137         elif "oacl" in test_name:
1138             feature = "-features-oacl"
1139         elif "snat" in test_name or "cop" in test_name:
1140             feature = "-features"
1141         else:
1142             feature = "-base-scale"
1143
1144     elif "ip6base" in test_name or "ip6scale" in test_name:
1145         file_name = "ip6"
1146         feature = "-base-scale"
1147
1148     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1149             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1150             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1151         file_name = "l2"
1152         if "macip" in test_name:
1153             feature = "-features-macip"
1154         elif "iacl" in test_name:
1155             feature = "-features-iacl"
1156         elif "oacl" in test_name:
1157             feature = "-features-oacl"
1158         else:
1159             feature = "-base-scale"
1160
1161     if "x520" in test_name:
1162         nic = "x520-"
1163     elif "x710" in test_name:
1164         nic = "x710-"
1165     elif "xl710" in test_name:
1166         nic = "xl710-"
1167     elif "xxv710" in test_name:
1168         nic = "xxv710-"
1169     elif "vic1227" in test_name:
1170         nic = "vic1227-"
1171     elif "vic1385" in test_name:
1172         nic = "vic1385-"
1173     else:
1174         nic = ""
1175     anchor += nic
1176
1177     if "64b" in test_name:
1178         framesize = "64b"
1179     elif "78b" in test_name:
1180         framesize = "78b"
1181     elif "imix" in test_name:
1182         framesize = "imix"
1183     elif "9000b" in test_name:
1184         framesize = "9000b"
1185     elif "1518b" in test_name:
1186         framesize = "1518b"
1187     elif "114b" in test_name:
1188         framesize = "114b"
1189     else:
1190         framesize = ""
1191     anchor += framesize + '-'
1192
1193     if "1t1c" in test_name:
1194         anchor += "1t1c"
1195     elif "2t2c" in test_name:
1196         anchor += "2t2c"
1197     elif "4t4c" in test_name:
1198         anchor += "4t4c"
1199     elif "2t1c" in test_name:
1200         anchor += "2t1c"
1201     elif "4t2c" in test_name:
1202         anchor += "4t2c"
1203     elif "8t4c" in test_name:
1204         anchor += "8t4c"
1205
1206     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1207         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1208
1209
1210 def table_performance_trending_dashboard_html(table, input_data):
1211     """Generate the table(s) with algorithm:
1212     table_performance_trending_dashboard_html specified in the specification
1213     file.
1214
1215     :param table: Table to generate.
1216     :param input_data: Data to process.
1217     :type table: dict
1218     :type input_data: InputData
1219     """
1220
1221     testbed = table.get("testbed", None)
1222     if testbed is None:
1223         logging.error("The testbed is not defined for the table '{0}'.".
1224                       format(table.get("title", "")))
1225         return
1226
1227     logging.info("  Generating the table {0} ...".
1228                  format(table.get("title", "")))
1229
1230     try:
1231         with open(table["input-file"], 'rb') as csv_file:
1232             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1233             csv_lst = [item for item in csv_content]
1234     except KeyError:
1235         logging.warning("The input file is not defined.")
1236         return
1237     except csv.Error as err:
1238         logging.warning("Not possible to process the file '{0}'.\n{1}".
1239                         format(table["input-file"], err))
1240         return
1241
1242     # Table:
1243     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1244
1245     # Table header:
1246     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1247     for idx, item in enumerate(csv_lst[0]):
1248         alignment = "left" if idx == 0 else "center"
1249         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1250         th.text = item
1251
1252     # Rows:
1253     colors = {"regression": ("#ffcccc", "#ff9999"),
1254               "progression": ("#c6ecc6", "#9fdf9f"),
1255               "normal": ("#e9f1fb", "#d4e4f7")}
1256     for r_idx, row in enumerate(csv_lst[1:]):
1257         if int(row[4]):
1258             color = "regression"
1259         elif int(row[5]):
1260             color = "progression"
1261         else:
1262             color = "normal"
1263         background = colors[color][r_idx % 2]
1264         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1265
1266         # Columns:
1267         for c_idx, item in enumerate(row):
1268             alignment = "left" if c_idx == 0 else "center"
1269             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1270             # Name:
1271             if c_idx == 0:
1272                 url = _generate_url("../trending/", testbed, item)
1273                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1274                 ref.text = item
1275             else:
1276                 td.text = item
1277     try:
1278         with open(table["output-file"], 'w') as html_file:
1279             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1280             html_file.write(".. raw:: html\n\n\t")
1281             html_file.write(ET.tostring(dashboard))
1282             html_file.write("\n\t<p><br><br></p>\n")
1283     except KeyError:
1284         logging.warning("The output file is not defined.")
1285         return
1286
1287
1288 def table_last_failed_tests(table, input_data):
1289     """Generate the table(s) with algorithm: table_last_failed_tests
1290     specified in the specification file.
1291
1292     :param table: Table to generate.
1293     :param input_data: Data to process.
1294     :type table: pandas.Series
1295     :type input_data: InputData
1296     """
1297
1298     logging.info("  Generating the table {0} ...".
1299                  format(table.get("title", "")))
1300
1301     # Transform the data
1302     logging.info("    Creating the data set for the {0} '{1}'.".
1303                  format(table.get("type", ""), table.get("title", "")))
1304     data = input_data.filter_data(table, continue_on_error=True)
1305
1306     if data is None or data.empty:
1307         logging.warn("    No data for the {0} '{1}'.".
1308                      format(table.get("type", ""), table.get("title", "")))
1309         return
1310
1311     tbl_list = list()
1312     for job, builds in table["data"].items():
1313         for build in builds:
1314             build = str(build)
1315             try:
1316                 version = input_data.metadata(job, build).get("version", "")
1317             except KeyError:
1318                 logging.error("Data for {job}: {build} is not present.".
1319                               format(job=job, build=build))
1320                 return
1321             tbl_list.append(build)
1322             tbl_list.append(version)
1323             for tst_name, tst_data in data[job][build].iteritems():
1324                 if tst_data["status"] != "FAIL":
1325                     continue
1326                 groups = re.search(REGEX_NIC, tst_data["parent"])
1327                 if not groups:
1328                     continue
1329                 nic = groups.group(0)
1330                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1331
1332     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1333     logging.info("    Writing file: '{0}'".format(file_name))
1334     with open(file_name, "w") as file_handler:
1335         for test in tbl_list:
1336             file_handler.write(test + '\n')
1337
1338
1339 def table_failed_tests(table, input_data):
1340     """Generate the table(s) with algorithm: table_failed_tests
1341     specified in the specification file.
1342
1343     :param table: Table to generate.
1344     :param input_data: Data to process.
1345     :type table: pandas.Series
1346     :type input_data: InputData
1347     """
1348
1349     logging.info("  Generating the table {0} ...".
1350                  format(table.get("title", "")))
1351
1352     # Transform the data
1353     logging.info("    Creating the data set for the {0} '{1}'.".
1354                  format(table.get("type", ""), table.get("title", "")))
1355     data = input_data.filter_data(table, continue_on_error=True)
1356
1357     # Prepare the header of the tables
1358     header = ["Test Case",
1359               "Failures [#]",
1360               "Last Failure [Time]",
1361               "Last Failure [VPP-Build-Id]",
1362               "Last Failure [CSIT-Job-Build-Id]"]
1363
1364     # Generate the data for the table according to the model in the table
1365     # specification
1366
1367     now = dt.utcnow()
1368     timeperiod = timedelta(int(table.get("window", 7)))
1369
1370     tbl_dict = dict()
1371     for job, builds in table["data"].items():
1372         for build in builds:
1373             build = str(build)
1374             for tst_name, tst_data in data[job][build].iteritems():
1375                 if tst_name.lower() in table.get("ignore-list", list()):
1376                     continue
1377                 if tbl_dict.get(tst_name, None) is None:
1378                     groups = re.search(REGEX_NIC, tst_data["parent"])
1379                     if not groups:
1380                         continue
1381                     nic = groups.group(0)
1382                     tbl_dict[tst_name] = {
1383                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1384                         "data": OrderedDict()}
1385                 try:
1386                     generated = input_data.metadata(job, build).\
1387                         get("generated", "")
1388                     if not generated:
1389                         continue
1390                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1391                     if (now - then) <= timeperiod:
1392                         tbl_dict[tst_name]["data"][build] = (
1393                             tst_data["status"],
1394                             generated,
1395                             input_data.metadata(job, build).get("version", ""),
1396                             build)
1397                 except (TypeError, KeyError) as err:
1398                     logging.warning("tst_name: {} - err: {}".
1399                                     format(tst_name, repr(err)))
1400
1401     max_fails = 0
1402     tbl_lst = list()
1403     for tst_data in tbl_dict.values():
1404         fails_nr = 0
1405         for val in tst_data["data"].values():
1406             if val[0] == "FAIL":
1407                 fails_nr += 1
1408                 fails_last_date = val[1]
1409                 fails_last_vpp = val[2]
1410                 fails_last_csit = val[3]
1411         if fails_nr:
1412             max_fails = fails_nr if fails_nr > max_fails else max_fails
1413             tbl_lst.append([tst_data["name"],
1414                             fails_nr,
1415                             fails_last_date,
1416                             fails_last_vpp,
1417                             "mrr-daily-build-{0}".format(fails_last_csit)])
1418
1419     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1420     tbl_sorted = list()
1421     for nrf in range(max_fails, -1, -1):
1422         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1423         tbl_sorted.extend(tbl_fails)
1424     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1425
1426     logging.info("    Writing file: '{0}'".format(file_name))
1427     with open(file_name, "w") as file_handler:
1428         file_handler.write(",".join(header) + "\n")
1429         for test in tbl_sorted:
1430             file_handler.write(",".join([str(item) for item in test]) + '\n')
1431
1432     txt_file_name = "{0}.txt".format(table["output-file"])
1433     logging.info("    Writing file: '{0}'".format(txt_file_name))
1434     convert_csv_to_pretty_txt(file_name, txt_file_name)
1435
1436
1437 def table_failed_tests_html(table, input_data):
1438     """Generate the table(s) with algorithm: table_failed_tests_html
1439     specified in the specification file.
1440
1441     :param table: Table to generate.
1442     :param input_data: Data to process.
1443     :type table: pandas.Series
1444     :type input_data: InputData
1445     """
1446
1447     testbed = table.get("testbed", None)
1448     if testbed is None:
1449         logging.error("The testbed is not defined for the table '{0}'.".
1450                       format(table.get("title", "")))
1451         return
1452
1453     logging.info("  Generating the table {0} ...".
1454                  format(table.get("title", "")))
1455
1456     try:
1457         with open(table["input-file"], 'rb') as csv_file:
1458             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1459             csv_lst = [item for item in csv_content]
1460     except KeyError:
1461         logging.warning("The input file is not defined.")
1462         return
1463     except csv.Error as err:
1464         logging.warning("Not possible to process the file '{0}'.\n{1}".
1465                         format(table["input-file"], err))
1466         return
1467
1468     # Table:
1469     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1470
1471     # Table header:
1472     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1473     for idx, item in enumerate(csv_lst[0]):
1474         alignment = "left" if idx == 0 else "center"
1475         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1476         th.text = item
1477
1478     # Rows:
1479     colors = ("#e9f1fb", "#d4e4f7")
1480     for r_idx, row in enumerate(csv_lst[1:]):
1481         background = colors[r_idx % 2]
1482         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1483
1484         # Columns:
1485         for c_idx, item in enumerate(row):
1486             alignment = "left" if c_idx == 0 else "center"
1487             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1488             # Name:
1489             if c_idx == 0:
1490                 url = _generate_url("../trending/", testbed, item)
1491                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1492                 ref.text = item
1493             else:
1494                 td.text = item
1495     try:
1496         with open(table["output-file"], 'w') as html_file:
1497             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1498             html_file.write(".. raw:: html\n\n\t")
1499             html_file.write(ET.tostring(failed_tests))
1500             html_file.write("\n\t<p><br><br></p>\n")
1501     except KeyError:
1502         logging.warning("The output file is not defined.")
1503         return