Trending: Report also nr of passed tests in email
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table, continue_on_error=True)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(
146         table, continue_on_error=True, data_set="suites")
147     suites = input_data.merge_data(suites)
148
149     # Prepare the header of the tables
150     header = list()
151     for column in table["columns"]:
152         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153
154     for _, suite in suites.iteritems():
155         # Generate data
156         suite_name = suite["name"]
157         table_lst = list()
158         for test in data.keys():
159             if data[test]["parent"] in suite_name:
160                 row_lst = list()
161                 for column in table["columns"]:
162                     try:
163                         col_data = str(data[test][column["data"].
164                                        split(" ")[1]]).replace('"', '""')
165                         col_data = replace(col_data, "No Data",
166                                            "Not Captured     ")
167                         if column["data"].split(" ")[1] in ("conf-history",
168                                                             "show-run"):
169                             col_data = replace(col_data, " |br| ", "",
170                                                maxreplace=1)
171                             col_data = " |prein| {0} |preout| ".\
172                                 format(col_data[:-5])
173                         row_lst.append('"{0}"'.format(col_data))
174                     except KeyError:
175                         row_lst.append('"Not captured"')
176                 table_lst.append(row_lst)
177
178         # Write the data to file
179         if table_lst:
180             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
181                                             table["output-file-ext"])
182             logging.info("      Writing file: '{}'".format(file_name))
183             with open(file_name, "w") as file_handler:
184                 file_handler.write(",".join(header) + "\n")
185                 for item in table_lst:
186                     file_handler.write(",".join(item) + "\n")
187
188     logging.info("  Done.")
189
190
191 def _tpc_modify_test_name(test_name):
192     test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
193         replace("-ndrpdr", "").replace("-pdrdisc", ""). \
194         replace("-ndrdisc", "").replace("-pdr", ""). \
195         replace("-ndr", ""). \
196         replace("1t1c", "1c").replace("2t1c", "1c"). \
197         replace("2t2c", "2c").replace("4t2c", "2c"). \
198         replace("4t4c", "4c").replace("8t4c", "4c")
199     test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
200     return test_name_mod
201
202
203 def _tpc_modify_displayed_test_name(test_name):
204     return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
205         replace("2t2c", "2c").replace("4t2c", "2c"). \
206         replace("4t4c", "4c").replace("8t4c", "4c")
207
208
209 def _tpc_insert_data(target, src, include_tests):
210     try:
211         if include_tests == "MRR":
212             target.append(src["result"]["receive-rate"].avg)
213         elif include_tests == "PDR":
214             target.append(src["throughput"]["PDR"]["LOWER"])
215         elif include_tests == "NDR":
216             target.append(src["throughput"]["NDR"]["LOWER"])
217     except (KeyError, TypeError):
218         pass
219
220
221 def _tpc_sort_table(table):
222     # Sort the table:
223     # 1. New in CSIT-XXXX
224     # 2. See footnote
225     # 3. Delta
226     tbl_new = list()
227     tbl_see = list()
228     tbl_delta = list()
229     for item in table:
230         if isinstance(item[-1], str):
231             if "New in CSIT" in item[-1]:
232                 tbl_new.append(item)
233             elif "See footnote" in item[-1]:
234                 tbl_see.append(item)
235         else:
236             tbl_delta.append(item)
237
238     # Sort the tables:
239     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
240     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
241     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
242     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
243
244     # Put the tables together:
245     table = list()
246     table.extend(tbl_new)
247     table.extend(tbl_see)
248     table.extend(tbl_delta)
249
250     return table
251
252
253 def table_performance_comparison(table, input_data):
254     """Generate the table(s) with algorithm: table_performance_comparison
255     specified in the specification file.
256
257     :param table: Table to generate.
258     :param input_data: Data to process.
259     :type table: pandas.Series
260     :type input_data: InputData
261     """
262
263     logging.info("  Generating the table {0} ...".
264                  format(table.get("title", "")))
265
266     # Transform the data
267     logging.info("    Creating the data set for the {0} '{1}'.".
268                  format(table.get("type", ""), table.get("title", "")))
269     data = input_data.filter_data(table, continue_on_error=True)
270
271     # Prepare the header of the tables
272     try:
273         header = ["Test case", ]
274
275         if table["include-tests"] == "MRR":
276             hdr_param = "Rec Rate"
277         else:
278             hdr_param = "Thput"
279
280         history = table.get("history", None)
281         if history:
282             for item in history:
283                 header.extend(
284                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
285                      "{0} Stdev [Mpps]".format(item["title"])])
286         header.extend(
287             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
288              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
289              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
290              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
291              "Delta [%]"])
292         header_str = ",".join(header) + "\n"
293     except (AttributeError, KeyError) as err:
294         logging.error("The model is invalid, missing parameter: {0}".
295                       format(err))
296         return
297
298     # Prepare data to the table:
299     tbl_dict = dict()
300     for job, builds in table["reference"]["data"].items():
301         topo = "2n-skx" if "2n-skx" in job else ""
302         for build in builds:
303             for tst_name, tst_data in data[job][str(build)].iteritems():
304                 tst_name_mod = _tpc_modify_test_name(tst_name)
305                 if "across topologies" in table["title"].lower():
306                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
307                 if tbl_dict.get(tst_name_mod, None) is None:
308                     groups = re.search(REGEX_NIC, tst_data["parent"])
309                     nic = groups.group(0) if groups else ""
310                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
311                                                           split("-")[:-1]))
312                     if "across testbeds" in table["title"].lower() or \
313                             "across topologies" in table["title"].lower():
314                         name = _tpc_modify_displayed_test_name(name)
315                     tbl_dict[tst_name_mod] = {"name": name,
316                                               "ref-data": list(),
317                                               "cmp-data": list()}
318                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
319                                  src=tst_data,
320                                  include_tests=table["include-tests"])
321
322     for job, builds in table["compare"]["data"].items():
323         for build in builds:
324             for tst_name, tst_data in data[job][str(build)].iteritems():
325                 tst_name_mod = _tpc_modify_test_name(tst_name)
326                 if "across topologies" in table["title"].lower():
327                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
328                 if tbl_dict.get(tst_name_mod, None) is None:
329                     groups = re.search(REGEX_NIC, tst_data["parent"])
330                     nic = groups.group(0) if groups else ""
331                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
332                                                           split("-")[:-1]))
333                     if "across testbeds" in table["title"].lower() or \
334                             "across topologies" in table["title"].lower():
335                         name = _tpc_modify_displayed_test_name(name)
336                     tbl_dict[tst_name_mod] = {"name": name,
337                                               "ref-data": list(),
338                                               "cmp-data": list()}
339                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
340                                  src=tst_data,
341                                  include_tests=table["include-tests"])
342
343     replacement = table["compare"].get("data-replacement", None)
344     if replacement:
345         create_new_list = True
346         rpl_data = input_data.filter_data(
347             table, data=replacement, continue_on_error=True)
348         for job, builds in replacement.items():
349             for build in builds:
350                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
351                     tst_name_mod = _tpc_modify_test_name(tst_name)
352                     if "across topologies" in table["title"].lower():
353                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
354                     if tbl_dict.get(tst_name_mod, None) is None:
355                         name = "{0}".format("-".join(tst_data["name"].
356                                                      split("-")[:-1]))
357                         if "across testbeds" in table["title"].lower() or \
358                                 "across topologies" in table["title"].lower():
359                             name = _tpc_modify_displayed_test_name(name)
360                         tbl_dict[tst_name_mod] = {"name": name,
361                                                   "ref-data": list(),
362                                                   "cmp-data": list()}
363                     if create_new_list:
364                         create_new_list = False
365                         tbl_dict[tst_name_mod]["cmp-data"] = list()
366
367                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
368                                      src=tst_data,
369                                      include_tests=table["include-tests"])
370
371     if history:
372         for item in history:
373             for job, builds in item["data"].items():
374                 for build in builds:
375                     for tst_name, tst_data in data[job][str(build)].iteritems():
376                         tst_name_mod = _tpc_modify_test_name(tst_name)
377                         if "across topologies" in table["title"].lower():
378                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
379                         if tbl_dict.get(tst_name_mod, None) is None:
380                             continue
381                         if tbl_dict[tst_name_mod].get("history", None) is None:
382                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
383                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
384                                                              None) is None:
385                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
386                                 list()
387                         try:
388                             # TODO: Re-work when NDRPDRDISC tests are not used
389                             if table["include-tests"] == "MRR":
390                                 tbl_dict[tst_name_mod]["history"][item["title"
391                                 ]].append(tst_data["result"]["receive-rate"].
392                                           avg)
393                             elif table["include-tests"] == "PDR":
394                                 if tst_data["type"] == "PDR":
395                                     tbl_dict[tst_name_mod]["history"][
396                                         item["title"]].\
397                                         append(tst_data["throughput"]["value"])
398                                 elif tst_data["type"] == "NDRPDR":
399                                     tbl_dict[tst_name_mod]["history"][item[
400                                         "title"]].append(tst_data["throughput"][
401                                         "PDR"]["LOWER"])
402                             elif table["include-tests"] == "NDR":
403                                 if tst_data["type"] == "NDR":
404                                     tbl_dict[tst_name_mod]["history"][
405                                         item["title"]].\
406                                         append(tst_data["throughput"]["value"])
407                                 elif tst_data["type"] == "NDRPDR":
408                                     tbl_dict[tst_name_mod]["history"][item[
409                                         "title"]].append(tst_data["throughput"][
410                                         "NDR"]["LOWER"])
411                             else:
412                                 continue
413                         except (TypeError, KeyError):
414                             pass
415
416     tbl_lst = list()
417     footnote = False
418     for tst_name in tbl_dict.keys():
419         item = [tbl_dict[tst_name]["name"], ]
420         if history:
421             if tbl_dict[tst_name].get("history", None) is not None:
422                 for hist_data in tbl_dict[tst_name]["history"].values():
423                     if hist_data:
424                         item.append(round(mean(hist_data) / 1000000, 2))
425                         item.append(round(stdev(hist_data) / 1000000, 2))
426                     else:
427                         item.extend(["Not tested", "Not tested"])
428             else:
429                 item.extend(["Not tested", "Not tested"])
430         data_t = tbl_dict[tst_name]["ref-data"]
431         if data_t:
432             item.append(round(mean(data_t) / 1000000, 2))
433             item.append(round(stdev(data_t) / 1000000, 2))
434         else:
435             item.extend(["Not tested", "Not tested"])
436         data_t = tbl_dict[tst_name]["cmp-data"]
437         if data_t:
438             item.append(round(mean(data_t) / 1000000, 2))
439             item.append(round(stdev(data_t) / 1000000, 2))
440         else:
441             item.extend(["Not tested", "Not tested"])
442         if item[-2] == "Not tested":
443             pass
444         elif item[-4] == "Not tested":
445             item.append("New in CSIT-1908")
446         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
447             item.append("See footnote [1]")
448             footnote = True
449         elif item[-4] != 0:
450             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
451         if (len(item) == len(header)) and (item[-3] != "Not tested"):
452             tbl_lst.append(item)
453
454     tbl_lst = _tpc_sort_table(tbl_lst)
455
456     # Generate csv tables:
457     csv_file = "{0}.csv".format(table["output-file"])
458     with open(csv_file, "w") as file_handler:
459         file_handler.write(header_str)
460         for test in tbl_lst:
461             file_handler.write(",".join([str(item) for item in test]) + "\n")
462
463     txt_file_name = "{0}.txt".format(table["output-file"])
464     convert_csv_to_pretty_txt(csv_file, txt_file_name)
465
466     if footnote:
467         with open(txt_file_name, 'a') as txt_file:
468             txt_file.writelines([
469                 "\nFootnotes:\n",
470                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
471                 "2-node testbeds, dot1q encapsulation is now used on both "
472                 "links of SUT.\n",
473                 "    Previously dot1q was used only on a single link with the "
474                 "other link carrying untagged Ethernet frames. This changes "
475                 "results\n",
476                 "    in slightly lower throughput in CSIT-1908 for these "
477                 "tests. See release notes."
478             ])
479
480
481 def table_performance_comparison_nic(table, input_data):
482     """Generate the table(s) with algorithm: table_performance_comparison
483     specified in the specification file.
484
485     :param table: Table to generate.
486     :param input_data: Data to process.
487     :type table: pandas.Series
488     :type input_data: InputData
489     """
490
491     logging.info("  Generating the table {0} ...".
492                  format(table.get("title", "")))
493
494     # Transform the data
495     logging.info("    Creating the data set for the {0} '{1}'.".
496                  format(table.get("type", ""), table.get("title", "")))
497     data = input_data.filter_data(table, continue_on_error=True)
498
499     # Prepare the header of the tables
500     try:
501         header = ["Test case", ]
502
503         if table["include-tests"] == "MRR":
504             hdr_param = "Rec Rate"
505         else:
506             hdr_param = "Thput"
507
508         history = table.get("history", None)
509         if history:
510             for item in history:
511                 header.extend(
512                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
513                      "{0} Stdev [Mpps]".format(item["title"])])
514         header.extend(
515             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
516              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
517              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
518              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
519              "Delta [%]"])
520         header_str = ",".join(header) + "\n"
521     except (AttributeError, KeyError) as err:
522         logging.error("The model is invalid, missing parameter: {0}".
523                       format(err))
524         return
525
526     # Prepare data to the table:
527     tbl_dict = dict()
528     for job, builds in table["reference"]["data"].items():
529         topo = "2n-skx" if "2n-skx" in job else ""
530         for build in builds:
531             for tst_name, tst_data in data[job][str(build)].iteritems():
532                 if table["reference"]["nic"] not in tst_data["tags"]:
533                     continue
534                 tst_name_mod = _tpc_modify_test_name(tst_name)
535                 if "across topologies" in table["title"].lower():
536                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
537                 if tbl_dict.get(tst_name_mod, None) is None:
538                     name = "{0}".format("-".join(tst_data["name"].
539                                                  split("-")[:-1]))
540                     if "across testbeds" in table["title"].lower() or \
541                             "across topologies" in table["title"].lower():
542                         name = _tpc_modify_displayed_test_name(name)
543                     tbl_dict[tst_name_mod] = {"name": name,
544                                               "ref-data": list(),
545                                               "cmp-data": list()}
546                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
547                                  src=tst_data,
548                                  include_tests=table["include-tests"])
549
550     for job, builds in table["compare"]["data"].items():
551         for build in builds:
552             for tst_name, tst_data in data[job][str(build)].iteritems():
553                 if table["compare"]["nic"] not in tst_data["tags"]:
554                     continue
555                 tst_name_mod = _tpc_modify_test_name(tst_name)
556                 if "across topologies" in table["title"].lower():
557                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
558                 if tbl_dict.get(tst_name_mod, None) is None:
559                     name = "{0}".format("-".join(tst_data["name"].
560                                                  split("-")[:-1]))
561                     if "across testbeds" in table["title"].lower() or \
562                             "across topologies" in table["title"].lower():
563                         name = _tpc_modify_displayed_test_name(name)
564                     tbl_dict[tst_name_mod] = {"name": name,
565                                               "ref-data": list(),
566                                               "cmp-data": list()}
567                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
568                                  src=tst_data,
569                                  include_tests=table["include-tests"])
570
571     replacement = table["compare"].get("data-replacement", None)
572     if replacement:
573         create_new_list = True
574         rpl_data = input_data.filter_data(
575             table, data=replacement, continue_on_error=True)
576         for job, builds in replacement.items():
577             for build in builds:
578                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
579                     if table["compare"]["nic"] not in tst_data["tags"]:
580                         continue
581                     tst_name_mod = _tpc_modify_test_name(tst_name)
582                     if "across topologies" in table["title"].lower():
583                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
584                     if tbl_dict.get(tst_name_mod, None) is None:
585                         name = "{0}".format("-".join(tst_data["name"].
586                                                      split("-")[:-1]))
587                         if "across testbeds" in table["title"].lower() or \
588                                 "across topologies" in table["title"].lower():
589                             name = _tpc_modify_displayed_test_name(name)
590                         tbl_dict[tst_name_mod] = {"name": name,
591                                                   "ref-data": list(),
592                                                   "cmp-data": list()}
593                     if create_new_list:
594                         create_new_list = False
595                         tbl_dict[tst_name_mod]["cmp-data"] = list()
596
597                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
598                                      src=tst_data,
599                                      include_tests=table["include-tests"])
600
601     if history:
602         for item in history:
603             for job, builds in item["data"].items():
604                 for build in builds:
605                     for tst_name, tst_data in data[job][str(build)].iteritems():
606                         if item["nic"] not in tst_data["tags"]:
607                             continue
608                         tst_name_mod = _tpc_modify_test_name(tst_name)
609                         if "across topologies" in table["title"].lower():
610                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
611                         if tbl_dict.get(tst_name_mod, None) is None:
612                             continue
613                         if tbl_dict[tst_name_mod].get("history", None) is None:
614                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
615                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
616                                                              None) is None:
617                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
618                                 list()
619                         try:
620                             # TODO: Re-work when NDRPDRDISC tests are not used
621                             if table["include-tests"] == "MRR":
622                                 tbl_dict[tst_name_mod]["history"][item["title"
623                                 ]].append(tst_data["result"]["receive-rate"].
624                                           avg)
625                             elif table["include-tests"] == "PDR":
626                                 if tst_data["type"] == "PDR":
627                                     tbl_dict[tst_name_mod]["history"][
628                                         item["title"]].\
629                                         append(tst_data["throughput"]["value"])
630                                 elif tst_data["type"] == "NDRPDR":
631                                     tbl_dict[tst_name_mod]["history"][item[
632                                         "title"]].append(tst_data["throughput"][
633                                         "PDR"]["LOWER"])
634                             elif table["include-tests"] == "NDR":
635                                 if tst_data["type"] == "NDR":
636                                     tbl_dict[tst_name_mod]["history"][
637                                         item["title"]].\
638                                         append(tst_data["throughput"]["value"])
639                                 elif tst_data["type"] == "NDRPDR":
640                                     tbl_dict[tst_name_mod]["history"][item[
641                                         "title"]].append(tst_data["throughput"][
642                                         "NDR"]["LOWER"])
643                             else:
644                                 continue
645                         except (TypeError, KeyError):
646                             pass
647
648     tbl_lst = list()
649     footnote = False
650     for tst_name in tbl_dict.keys():
651         item = [tbl_dict[tst_name]["name"], ]
652         if history:
653             if tbl_dict[tst_name].get("history", None) is not None:
654                 for hist_data in tbl_dict[tst_name]["history"].values():
655                     if hist_data:
656                         item.append(round(mean(hist_data) / 1000000, 2))
657                         item.append(round(stdev(hist_data) / 1000000, 2))
658                     else:
659                         item.extend(["Not tested", "Not tested"])
660             else:
661                 item.extend(["Not tested", "Not tested"])
662         data_t = tbl_dict[tst_name]["ref-data"]
663         if data_t:
664             item.append(round(mean(data_t) / 1000000, 2))
665             item.append(round(stdev(data_t) / 1000000, 2))
666         else:
667             item.extend(["Not tested", "Not tested"])
668         data_t = tbl_dict[tst_name]["cmp-data"]
669         if data_t:
670             item.append(round(mean(data_t) / 1000000, 2))
671             item.append(round(stdev(data_t) / 1000000, 2))
672         else:
673             item.extend(["Not tested", "Not tested"])
674         if item[-2] == "Not tested":
675             pass
676         elif item[-4] == "Not tested":
677             item.append("New in CSIT-1908")
678         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
679             item.append("See footnote [1]")
680             footnote = True
681         elif item[-4] != 0:
682             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
683         if (len(item) == len(header)) and (item[-3] != "Not tested"):
684             tbl_lst.append(item)
685
686     tbl_lst = _tpc_sort_table(tbl_lst)
687
688     # Generate csv tables:
689     csv_file = "{0}.csv".format(table["output-file"])
690     with open(csv_file, "w") as file_handler:
691         file_handler.write(header_str)
692         for test in tbl_lst:
693             file_handler.write(",".join([str(item) for item in test]) + "\n")
694
695     txt_file_name = "{0}.txt".format(table["output-file"])
696     convert_csv_to_pretty_txt(csv_file, txt_file_name)
697
698     if footnote:
699         with open(txt_file_name, 'a') as txt_file:
700             txt_file.writelines([
701                 "\nFootnotes:\n",
702                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
703                 "2-node testbeds, dot1q encapsulation is now used on both "
704                 "links of SUT.\n",
705                 "    Previously dot1q was used only on a single link with the "
706                 "other link carrying untagged Ethernet frames. This changes "
707                 "results\n",
708                 "    in slightly lower throughput in CSIT-1908 for these "
709                 "tests. See release notes."
710             ])
711
712
713 def table_nics_comparison(table, input_data):
714     """Generate the table(s) with algorithm: table_nics_comparison
715     specified in the specification file.
716
717     :param table: Table to generate.
718     :param input_data: Data to process.
719     :type table: pandas.Series
720     :type input_data: InputData
721     """
722
723     logging.info("  Generating the table {0} ...".
724                  format(table.get("title", "")))
725
726     # Transform the data
727     logging.info("    Creating the data set for the {0} '{1}'.".
728                  format(table.get("type", ""), table.get("title", "")))
729     data = input_data.filter_data(table, continue_on_error=True)
730
731     # Prepare the header of the tables
732     try:
733         header = ["Test case", ]
734
735         if table["include-tests"] == "MRR":
736             hdr_param = "Rec Rate"
737         else:
738             hdr_param = "Thput"
739
740         header.extend(
741             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
742              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
743              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
744              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
745              "Delta [%]"])
746         header_str = ",".join(header) + "\n"
747     except (AttributeError, KeyError) as err:
748         logging.error("The model is invalid, missing parameter: {0}".
749                       format(err))
750         return
751
752     # Prepare data to the table:
753     tbl_dict = dict()
754     for job, builds in table["data"].items():
755         for build in builds:
756             for tst_name, tst_data in data[job][str(build)].iteritems():
757                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
758                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
759                     replace("-ndrdisc", "").replace("-pdr", "").\
760                     replace("-ndr", "").\
761                     replace("1t1c", "1c").replace("2t1c", "1c").\
762                     replace("2t2c", "2c").replace("4t2c", "2c").\
763                     replace("4t4c", "4c").replace("8t4c", "4c")
764                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
765                 if tbl_dict.get(tst_name_mod, None) is None:
766                     name = "-".join(tst_data["name"].split("-")[:-1])
767                     tbl_dict[tst_name_mod] = {"name": name,
768                                               "ref-data": list(),
769                                               "cmp-data": list()}
770                 try:
771                     if table["include-tests"] == "MRR":
772                         result = tst_data["result"]["receive-rate"].avg
773                     elif table["include-tests"] == "PDR":
774                         result = tst_data["throughput"]["PDR"]["LOWER"]
775                     elif table["include-tests"] == "NDR":
776                         result = tst_data["throughput"]["NDR"]["LOWER"]
777                     else:
778                         result = None
779
780                     if result:
781                         if table["reference"]["nic"] in tst_data["tags"]:
782                             tbl_dict[tst_name_mod]["ref-data"].append(result)
783                         elif table["compare"]["nic"] in tst_data["tags"]:
784                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
785                 except (TypeError, KeyError) as err:
786                     logging.debug("No data for {0}".format(tst_name))
787                     logging.debug(repr(err))
788                     # No data in output.xml for this test
789
790     tbl_lst = list()
791     for tst_name in tbl_dict.keys():
792         item = [tbl_dict[tst_name]["name"], ]
793         data_t = tbl_dict[tst_name]["ref-data"]
794         if data_t:
795             item.append(round(mean(data_t) / 1000000, 2))
796             item.append(round(stdev(data_t) / 1000000, 2))
797         else:
798             item.extend([None, None])
799         data_t = tbl_dict[tst_name]["cmp-data"]
800         if data_t:
801             item.append(round(mean(data_t) / 1000000, 2))
802             item.append(round(stdev(data_t) / 1000000, 2))
803         else:
804             item.extend([None, None])
805         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
806             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
807         if len(item) == len(header):
808             tbl_lst.append(item)
809
810     # Sort the table according to the relative change
811     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
812
813     # Generate csv tables:
814     csv_file = "{0}.csv".format(table["output-file"])
815     with open(csv_file, "w") as file_handler:
816         file_handler.write(header_str)
817         for test in tbl_lst:
818             file_handler.write(",".join([str(item) for item in test]) + "\n")
819
820     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
821
822
823 def table_soak_vs_ndr(table, input_data):
824     """Generate the table(s) with algorithm: table_soak_vs_ndr
825     specified in the specification file.
826
827     :param table: Table to generate.
828     :param input_data: Data to process.
829     :type table: pandas.Series
830     :type input_data: InputData
831     """
832
833     logging.info("  Generating the table {0} ...".
834                  format(table.get("title", "")))
835
836     # Transform the data
837     logging.info("    Creating the data set for the {0} '{1}'.".
838                  format(table.get("type", ""), table.get("title", "")))
839     data = input_data.filter_data(table, continue_on_error=True)
840
841     # Prepare the header of the table
842     try:
843         header = [
844             "Test case",
845             "{0} Thput [Mpps]".format(table["reference"]["title"]),
846             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
847             "{0} Thput [Mpps]".format(table["compare"]["title"]),
848             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
849             "Delta [%]", "Stdev of delta [%]"]
850         header_str = ",".join(header) + "\n"
851     except (AttributeError, KeyError) as err:
852         logging.error("The model is invalid, missing parameter: {0}".
853                       format(err))
854         return
855
856     # Create a list of available SOAK test results:
857     tbl_dict = dict()
858     for job, builds in table["compare"]["data"].items():
859         for build in builds:
860             for tst_name, tst_data in data[job][str(build)].iteritems():
861                 if tst_data["type"] == "SOAK":
862                     tst_name_mod = tst_name.replace("-soak", "")
863                     if tbl_dict.get(tst_name_mod, None) is None:
864                         groups = re.search(REGEX_NIC, tst_data["parent"])
865                         nic = groups.group(0) if groups else ""
866                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
867                                                               split("-")[:-1]))
868                         tbl_dict[tst_name_mod] = {
869                             "name": name,
870                             "ref-data": list(),
871                             "cmp-data": list()
872                         }
873                     try:
874                         tbl_dict[tst_name_mod]["cmp-data"].append(
875                             tst_data["throughput"]["LOWER"])
876                     except (KeyError, TypeError):
877                         pass
878     tests_lst = tbl_dict.keys()
879
880     # Add corresponding NDR test results:
881     for job, builds in table["reference"]["data"].items():
882         for build in builds:
883             for tst_name, tst_data in data[job][str(build)].iteritems():
884                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
885                     replace("-mrr", "")
886                 if tst_name_mod in tests_lst:
887                     try:
888                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
889                             if table["include-tests"] == "MRR":
890                                 result = tst_data["result"]["receive-rate"].avg
891                             elif table["include-tests"] == "PDR":
892                                 result = tst_data["throughput"]["PDR"]["LOWER"]
893                             elif table["include-tests"] == "NDR":
894                                 result = tst_data["throughput"]["NDR"]["LOWER"]
895                             else:
896                                 result = None
897                             if result is not None:
898                                 tbl_dict[tst_name_mod]["ref-data"].append(
899                                     result)
900                     except (KeyError, TypeError):
901                         continue
902
903     tbl_lst = list()
904     for tst_name in tbl_dict.keys():
905         item = [tbl_dict[tst_name]["name"], ]
906         data_r = tbl_dict[tst_name]["ref-data"]
907         if data_r:
908             data_r_mean = mean(data_r)
909             item.append(round(data_r_mean / 1000000, 2))
910             data_r_stdev = stdev(data_r)
911             item.append(round(data_r_stdev / 1000000, 2))
912         else:
913             data_r_mean = None
914             data_r_stdev = None
915             item.extend([None, None])
916         data_c = tbl_dict[tst_name]["cmp-data"]
917         if data_c:
918             data_c_mean = mean(data_c)
919             item.append(round(data_c_mean / 1000000, 2))
920             data_c_stdev = stdev(data_c)
921             item.append(round(data_c_stdev / 1000000, 2))
922         else:
923             data_c_mean = None
924             data_c_stdev = None
925             item.extend([None, None])
926         if data_r_mean and data_c_mean:
927             delta, d_stdev = relative_change_stdev(
928                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
929             item.append(round(delta, 2))
930             item.append(round(d_stdev, 2))
931             tbl_lst.append(item)
932
933     # Sort the table according to the relative change
934     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
935
936     # Generate csv tables:
937     csv_file = "{0}.csv".format(table["output-file"])
938     with open(csv_file, "w") as file_handler:
939         file_handler.write(header_str)
940         for test in tbl_lst:
941             file_handler.write(",".join([str(item) for item in test]) + "\n")
942
943     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
944
945
946 def table_performance_trending_dashboard(table, input_data):
947     """Generate the table(s) with algorithm:
948     table_performance_trending_dashboard
949     specified in the specification file.
950
951     :param table: Table to generate.
952     :param input_data: Data to process.
953     :type table: pandas.Series
954     :type input_data: InputData
955     """
956
957     logging.info("  Generating the table {0} ...".
958                  format(table.get("title", "")))
959
960     # Transform the data
961     logging.info("    Creating the data set for the {0} '{1}'.".
962                  format(table.get("type", ""), table.get("title", "")))
963     data = input_data.filter_data(table, continue_on_error=True)
964
965     # Prepare the header of the tables
966     header = ["Test Case",
967               "Trend [Mpps]",
968               "Short-Term Change [%]",
969               "Long-Term Change [%]",
970               "Regressions [#]",
971               "Progressions [#]"
972               ]
973     header_str = ",".join(header) + "\n"
974
975     # Prepare data to the table:
976     tbl_dict = dict()
977     for job, builds in table["data"].items():
978         for build in builds:
979             for tst_name, tst_data in data[job][str(build)].iteritems():
980                 if tst_name.lower() in table.get("ignore-list", list()):
981                     continue
982                 if tbl_dict.get(tst_name, None) is None:
983                     groups = re.search(REGEX_NIC, tst_data["parent"])
984                     if not groups:
985                         continue
986                     nic = groups.group(0)
987                     tbl_dict[tst_name] = {
988                         "name": "{0}-{1}".format(nic, tst_data["name"]),
989                         "data": OrderedDict()}
990                 try:
991                     tbl_dict[tst_name]["data"][str(build)] = \
992                         tst_data["result"]["receive-rate"]
993                 except (TypeError, KeyError):
994                     pass  # No data in output.xml for this test
995
996     tbl_lst = list()
997     for tst_name in tbl_dict.keys():
998         data_t = tbl_dict[tst_name]["data"]
999         if len(data_t) < 2:
1000             continue
1001
1002         classification_lst, avgs = classify_anomalies(data_t)
1003
1004         win_size = min(len(data_t), table["window"])
1005         long_win_size = min(len(data_t), table["long-trend-window"])
1006
1007         try:
1008             max_long_avg = max(
1009                 [x for x in avgs[-long_win_size:-win_size]
1010                  if not isnan(x)])
1011         except ValueError:
1012             max_long_avg = nan
1013         last_avg = avgs[-1]
1014         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1015
1016         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1017             rel_change_last = nan
1018         else:
1019             rel_change_last = round(
1020                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1021
1022         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1023             rel_change_long = nan
1024         else:
1025             rel_change_long = round(
1026                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1027
1028         if classification_lst:
1029             if isnan(rel_change_last) and isnan(rel_change_long):
1030                 continue
1031             if (isnan(last_avg) or
1032                 isnan(rel_change_last) or
1033                 isnan(rel_change_long)):
1034                 continue
1035             tbl_lst.append(
1036                 [tbl_dict[tst_name]["name"],
1037                  round(last_avg / 1000000, 2),
1038                  rel_change_last,
1039                  rel_change_long,
1040                  classification_lst[-win_size:].count("regression"),
1041                  classification_lst[-win_size:].count("progression")])
1042
1043     tbl_lst.sort(key=lambda rel: rel[0])
1044
1045     tbl_sorted = list()
1046     for nrr in range(table["window"], -1, -1):
1047         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1048         for nrp in range(table["window"], -1, -1):
1049             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1050             tbl_out.sort(key=lambda rel: rel[2])
1051             tbl_sorted.extend(tbl_out)
1052
1053     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1054
1055     logging.info("    Writing file: '{0}'".format(file_name))
1056     with open(file_name, "w") as file_handler:
1057         file_handler.write(header_str)
1058         for test in tbl_sorted:
1059             file_handler.write(",".join([str(item) for item in test]) + '\n')
1060
1061     txt_file_name = "{0}.txt".format(table["output-file"])
1062     logging.info("    Writing file: '{0}'".format(txt_file_name))
1063     convert_csv_to_pretty_txt(file_name, txt_file_name)
1064
1065
1066 def _generate_url(base, testbed, test_name):
1067     """Generate URL to a trending plot from the name of the test case.
1068
1069     :param base: The base part of URL common to all test cases.
1070     :param testbed: The testbed used for testing.
1071     :param test_name: The name of the test case.
1072     :type base: str
1073     :type testbed: str
1074     :type test_name: str
1075     :returns: The URL to the plot with the trending data for the given test
1076         case.
1077     :rtype str
1078     """
1079
1080     url = base
1081     file_name = ""
1082     anchor = ".html#"
1083     feature = ""
1084
1085     if "lbdpdk" in test_name or "lbvpp" in test_name:
1086         file_name = "link_bonding"
1087
1088     elif "114b" in test_name and "vhost" in test_name:
1089         file_name = "vts"
1090
1091     elif "testpmd" in test_name or "l3fwd" in test_name:
1092         file_name = "dpdk"
1093
1094     elif "memif" in test_name:
1095         file_name = "container_memif"
1096         feature = "-base"
1097
1098     elif "srv6" in test_name:
1099         file_name = "srv6"
1100
1101     elif "vhost" in test_name:
1102         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1103             file_name = "vm_vhost_l2"
1104             if "114b" in test_name:
1105                 feature = ""
1106             elif "l2xcbase" in test_name and "x520" in test_name:
1107                 feature = "-base-l2xc"
1108             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1109                 feature = "-base-l2bd"
1110             else:
1111                 feature = "-base"
1112         elif "ip4base" in test_name:
1113             file_name = "vm_vhost_ip4"
1114             feature = "-base"
1115
1116     elif "ipsecbasetnlsw" in test_name:
1117         file_name = "ipsecsw"
1118         feature = "-base-scale"
1119
1120     elif "ipsec" in test_name:
1121         file_name = "ipsec"
1122         feature = "-base-scale"
1123         if "hw-" in test_name:
1124             file_name = "ipsechw"
1125         elif "sw-" in test_name:
1126             file_name = "ipsecsw"
1127         if "-int-" in test_name:
1128             feature = "-base-scale-int"
1129         elif "tnl" in test_name:
1130             feature = "-base-scale-tnl"
1131
1132     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1133         file_name = "ip4_tunnels"
1134         feature = "-base"
1135
1136     elif "ip4base" in test_name or "ip4scale" in test_name:
1137         file_name = "ip4"
1138         if "xl710" in test_name:
1139             feature = "-base-scale-features"
1140         elif "iacl" in test_name:
1141             feature = "-features-iacl"
1142         elif "oacl" in test_name:
1143             feature = "-features-oacl"
1144         elif "snat" in test_name or "cop" in test_name:
1145             feature = "-features"
1146         else:
1147             feature = "-base-scale"
1148
1149     elif "ip6base" in test_name or "ip6scale" in test_name:
1150         file_name = "ip6"
1151         feature = "-base-scale"
1152
1153     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1154             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1155             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1156         file_name = "l2"
1157         if "macip" in test_name:
1158             feature = "-features-macip"
1159         elif "iacl" in test_name:
1160             feature = "-features-iacl"
1161         elif "oacl" in test_name:
1162             feature = "-features-oacl"
1163         else:
1164             feature = "-base-scale"
1165
1166     if "x520" in test_name:
1167         nic = "x520-"
1168     elif "x710" in test_name:
1169         nic = "x710-"
1170     elif "xl710" in test_name:
1171         nic = "xl710-"
1172     elif "xxv710" in test_name:
1173         nic = "xxv710-"
1174     elif "vic1227" in test_name:
1175         nic = "vic1227-"
1176     elif "vic1385" in test_name:
1177         nic = "vic1385-"
1178     elif "x553" in test_name:
1179         nic = "x553-"
1180     else:
1181         nic = ""
1182     anchor += nic
1183
1184     if "64b" in test_name:
1185         framesize = "64b"
1186     elif "78b" in test_name:
1187         framesize = "78b"
1188     elif "imix" in test_name:
1189         framesize = "imix"
1190     elif "9000b" in test_name:
1191         framesize = "9000b"
1192     elif "1518b" in test_name:
1193         framesize = "1518b"
1194     elif "114b" in test_name:
1195         framesize = "114b"
1196     else:
1197         framesize = ""
1198     anchor += framesize + '-'
1199
1200     if "1t1c" in test_name:
1201         anchor += "1t1c"
1202     elif "2t2c" in test_name:
1203         anchor += "2t2c"
1204     elif "4t4c" in test_name:
1205         anchor += "4t4c"
1206     elif "2t1c" in test_name:
1207         anchor += "2t1c"
1208     elif "4t2c" in test_name:
1209         anchor += "4t2c"
1210     elif "8t4c" in test_name:
1211         anchor += "8t4c"
1212
1213     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1214         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1215
1216
1217 def table_performance_trending_dashboard_html(table, input_data):
1218     """Generate the table(s) with algorithm:
1219     table_performance_trending_dashboard_html specified in the specification
1220     file.
1221
1222     :param table: Table to generate.
1223     :param input_data: Data to process.
1224     :type table: dict
1225     :type input_data: InputData
1226     """
1227
1228     testbed = table.get("testbed", None)
1229     if testbed is None:
1230         logging.error("The testbed is not defined for the table '{0}'.".
1231                       format(table.get("title", "")))
1232         return
1233
1234     logging.info("  Generating the table {0} ...".
1235                  format(table.get("title", "")))
1236
1237     try:
1238         with open(table["input-file"], 'rb') as csv_file:
1239             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1240             csv_lst = [item for item in csv_content]
1241     except KeyError:
1242         logging.warning("The input file is not defined.")
1243         return
1244     except csv.Error as err:
1245         logging.warning("Not possible to process the file '{0}'.\n{1}".
1246                         format(table["input-file"], err))
1247         return
1248
1249     # Table:
1250     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1251
1252     # Table header:
1253     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1254     for idx, item in enumerate(csv_lst[0]):
1255         alignment = "left" if idx == 0 else "center"
1256         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1257         th.text = item
1258
1259     # Rows:
1260     colors = {"regression": ("#ffcccc", "#ff9999"),
1261               "progression": ("#c6ecc6", "#9fdf9f"),
1262               "normal": ("#e9f1fb", "#d4e4f7")}
1263     for r_idx, row in enumerate(csv_lst[1:]):
1264         if int(row[4]):
1265             color = "regression"
1266         elif int(row[5]):
1267             color = "progression"
1268         else:
1269             color = "normal"
1270         background = colors[color][r_idx % 2]
1271         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1272
1273         # Columns:
1274         for c_idx, item in enumerate(row):
1275             alignment = "left" if c_idx == 0 else "center"
1276             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1277             # Name:
1278             if c_idx == 0:
1279                 url = _generate_url("../trending/", testbed, item)
1280                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1281                 ref.text = item
1282             else:
1283                 td.text = item
1284     try:
1285         with open(table["output-file"], 'w') as html_file:
1286             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1287             html_file.write(".. raw:: html\n\n\t")
1288             html_file.write(ET.tostring(dashboard))
1289             html_file.write("\n\t<p><br><br></p>\n")
1290     except KeyError:
1291         logging.warning("The output file is not defined.")
1292         return
1293
1294
1295 def table_last_failed_tests(table, input_data):
1296     """Generate the table(s) with algorithm: table_last_failed_tests
1297     specified in the specification file.
1298
1299     :param table: Table to generate.
1300     :param input_data: Data to process.
1301     :type table: pandas.Series
1302     :type input_data: InputData
1303     """
1304
1305     logging.info("  Generating the table {0} ...".
1306                  format(table.get("title", "")))
1307
1308     # Transform the data
1309     logging.info("    Creating the data set for the {0} '{1}'.".
1310                  format(table.get("type", ""), table.get("title", "")))
1311     data = input_data.filter_data(table, continue_on_error=True)
1312
1313     if data is None or data.empty:
1314         logging.warn("    No data for the {0} '{1}'.".
1315                      format(table.get("type", ""), table.get("title", "")))
1316         return
1317
1318     tbl_list = list()
1319     for job, builds in table["data"].items():
1320         for build in builds:
1321             build = str(build)
1322             try:
1323                 version = input_data.metadata(job, build).get("version", "")
1324             except KeyError:
1325                 logging.error("Data for {job}: {build} is not present.".
1326                               format(job=job, build=build))
1327                 return
1328             tbl_list.append(build)
1329             tbl_list.append(version)
1330             failed_tests = list()
1331             passed = 0
1332             failed = 0
1333             for tst_name, tst_data in data[job][build].iteritems():
1334                 if tst_data["status"] != "FAIL":
1335                     passed += 1
1336                     continue
1337                 failed += 1
1338                 groups = re.search(REGEX_NIC, tst_data["parent"])
1339                 if not groups:
1340                     continue
1341                 nic = groups.group(0)
1342                 failed_tests.append("{0}-{1}".format(nic, tst_data["name"]))
1343             tbl_list.append(str(passed))
1344             tbl_list.append(str(failed))
1345             tbl_list.extend(failed_tests)
1346
1347     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1348     logging.info("    Writing file: '{0}'".format(file_name))
1349     with open(file_name, "w") as file_handler:
1350         for test in tbl_list:
1351             file_handler.write(test + '\n')
1352
1353
1354 def table_failed_tests(table, input_data):
1355     """Generate the table(s) with algorithm: table_failed_tests
1356     specified in the specification file.
1357
1358     :param table: Table to generate.
1359     :param input_data: Data to process.
1360     :type table: pandas.Series
1361     :type input_data: InputData
1362     """
1363
1364     logging.info("  Generating the table {0} ...".
1365                  format(table.get("title", "")))
1366
1367     # Transform the data
1368     logging.info("    Creating the data set for the {0} '{1}'.".
1369                  format(table.get("type", ""), table.get("title", "")))
1370     data = input_data.filter_data(table, continue_on_error=True)
1371
1372     # Prepare the header of the tables
1373     header = ["Test Case",
1374               "Failures [#]",
1375               "Last Failure [Time]",
1376               "Last Failure [VPP-Build-Id]",
1377               "Last Failure [CSIT-Job-Build-Id]"]
1378
1379     # Generate the data for the table according to the model in the table
1380     # specification
1381
1382     now = dt.utcnow()
1383     timeperiod = timedelta(int(table.get("window", 7)))
1384
1385     tbl_dict = dict()
1386     for job, builds in table["data"].items():
1387         for build in builds:
1388             build = str(build)
1389             for tst_name, tst_data in data[job][build].iteritems():
1390                 if tst_name.lower() in table.get("ignore-list", list()):
1391                     continue
1392                 if tbl_dict.get(tst_name, None) is None:
1393                     groups = re.search(REGEX_NIC, tst_data["parent"])
1394                     if not groups:
1395                         continue
1396                     nic = groups.group(0)
1397                     tbl_dict[tst_name] = {
1398                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1399                         "data": OrderedDict()}
1400                 try:
1401                     generated = input_data.metadata(job, build).\
1402                         get("generated", "")
1403                     if not generated:
1404                         continue
1405                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1406                     if (now - then) <= timeperiod:
1407                         tbl_dict[tst_name]["data"][build] = (
1408                             tst_data["status"],
1409                             generated,
1410                             input_data.metadata(job, build).get("version", ""),
1411                             build)
1412                 except (TypeError, KeyError) as err:
1413                     logging.warning("tst_name: {} - err: {}".
1414                                     format(tst_name, repr(err)))
1415
1416     max_fails = 0
1417     tbl_lst = list()
1418     for tst_data in tbl_dict.values():
1419         fails_nr = 0
1420         for val in tst_data["data"].values():
1421             if val[0] == "FAIL":
1422                 fails_nr += 1
1423                 fails_last_date = val[1]
1424                 fails_last_vpp = val[2]
1425                 fails_last_csit = val[3]
1426         if fails_nr:
1427             max_fails = fails_nr if fails_nr > max_fails else max_fails
1428             tbl_lst.append([tst_data["name"],
1429                             fails_nr,
1430                             fails_last_date,
1431                             fails_last_vpp,
1432                             "mrr-daily-build-{0}".format(fails_last_csit)])
1433
1434     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1435     tbl_sorted = list()
1436     for nrf in range(max_fails, -1, -1):
1437         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1438         tbl_sorted.extend(tbl_fails)
1439     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1440
1441     logging.info("    Writing file: '{0}'".format(file_name))
1442     with open(file_name, "w") as file_handler:
1443         file_handler.write(",".join(header) + "\n")
1444         for test in tbl_sorted:
1445             file_handler.write(",".join([str(item) for item in test]) + '\n')
1446
1447     txt_file_name = "{0}.txt".format(table["output-file"])
1448     logging.info("    Writing file: '{0}'".format(txt_file_name))
1449     convert_csv_to_pretty_txt(file_name, txt_file_name)
1450
1451
1452 def table_failed_tests_html(table, input_data):
1453     """Generate the table(s) with algorithm: table_failed_tests_html
1454     specified in the specification file.
1455
1456     :param table: Table to generate.
1457     :param input_data: Data to process.
1458     :type table: pandas.Series
1459     :type input_data: InputData
1460     """
1461
1462     testbed = table.get("testbed", None)
1463     if testbed is None:
1464         logging.error("The testbed is not defined for the table '{0}'.".
1465                       format(table.get("title", "")))
1466         return
1467
1468     logging.info("  Generating the table {0} ...".
1469                  format(table.get("title", "")))
1470
1471     try:
1472         with open(table["input-file"], 'rb') as csv_file:
1473             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1474             csv_lst = [item for item in csv_content]
1475     except KeyError:
1476         logging.warning("The input file is not defined.")
1477         return
1478     except csv.Error as err:
1479         logging.warning("Not possible to process the file '{0}'.\n{1}".
1480                         format(table["input-file"], err))
1481         return
1482
1483     # Table:
1484     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1485
1486     # Table header:
1487     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1488     for idx, item in enumerate(csv_lst[0]):
1489         alignment = "left" if idx == 0 else "center"
1490         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1491         th.text = item
1492
1493     # Rows:
1494     colors = ("#e9f1fb", "#d4e4f7")
1495     for r_idx, row in enumerate(csv_lst[1:]):
1496         background = colors[r_idx % 2]
1497         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1498
1499         # Columns:
1500         for c_idx, item in enumerate(row):
1501             alignment = "left" if c_idx == 0 else "center"
1502             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1503             # Name:
1504             if c_idx == 0:
1505                 url = _generate_url("../trending/", testbed, item)
1506                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1507                 ref.text = item
1508             else:
1509                 td.text = item
1510     try:
1511         with open(table["output-file"], 'w') as html_file:
1512             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1513             html_file.write(".. raw:: html\n\n\t")
1514             html_file.write(ET.tostring(failed_tests))
1515             html_file.write("\n\t<p><br><br></p>\n")
1516     except KeyError:
1517         logging.warning("The output file is not defined.")
1518         return