Add: Use containers for shared TG
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table, continue_on_error=True)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(
146         table, continue_on_error=True, data_set="suites")
147     suites = input_data.merge_data(suites)
148
149     # Prepare the header of the tables
150     header = list()
151     for column in table["columns"]:
152         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153
154     for _, suite in suites.iteritems():
155         # Generate data
156         suite_name = suite["name"]
157         table_lst = list()
158         for test in data.keys():
159             if data[test]["parent"] in suite_name:
160                 row_lst = list()
161                 for column in table["columns"]:
162                     try:
163                         col_data = str(data[test][column["data"].
164                                        split(" ")[1]]).replace('"', '""')
165                         col_data = replace(col_data, "No Data",
166                                            "Not Captured     ")
167                         if column["data"].split(" ")[1] in ("conf-history",
168                                                             "show-run"):
169                             col_data = replace(col_data, " |br| ", "",
170                                                maxreplace=1)
171                             col_data = " |prein| {0} |preout| ".\
172                                 format(col_data[:-5])
173                         row_lst.append('"{0}"'.format(col_data))
174                     except KeyError:
175                         row_lst.append('"Not captured"')
176                 table_lst.append(row_lst)
177
178         # Write the data to file
179         if table_lst:
180             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
181                                             table["output-file-ext"])
182             logging.info("      Writing file: '{}'".format(file_name))
183             with open(file_name, "w") as file_handler:
184                 file_handler.write(",".join(header) + "\n")
185                 for item in table_lst:
186                     file_handler.write(",".join(item) + "\n")
187
188     logging.info("  Done.")
189
190
191 def _tpc_modify_test_name(test_name):
192     test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
193         replace("-ndrpdr", "").replace("-pdrdisc", ""). \
194         replace("-ndrdisc", "").replace("-pdr", ""). \
195         replace("-ndr", ""). \
196         replace("1t1c", "1c").replace("2t1c", "1c"). \
197         replace("2t2c", "2c").replace("4t2c", "2c"). \
198         replace("4t4c", "4c").replace("8t4c", "4c")
199     test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
200     return test_name_mod
201
202
203 def _tpc_modify_displayed_test_name(test_name):
204     return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
205         replace("2t2c", "2c").replace("4t2c", "2c"). \
206         replace("4t4c", "4c").replace("8t4c", "4c")
207
208
209 def _tpc_insert_data(target, src, include_tests):
210     try:
211         if include_tests == "MRR":
212             target.append(src["result"]["receive-rate"].avg)
213         elif include_tests == "PDR":
214             target.append(src["throughput"]["PDR"]["LOWER"])
215         elif include_tests == "NDR":
216             target.append(src["throughput"]["NDR"]["LOWER"])
217     except (KeyError, TypeError):
218         pass
219
220
221 def _tpc_sort_table(table):
222     # Sort the table:
223     # 1. New in CSIT-XXXX
224     # 2. See footnote
225     # 3. Delta
226     tbl_new = list()
227     tbl_see = list()
228     tbl_delta = list()
229     for item in table:
230         if isinstance(item[-1], str):
231             if "New in CSIT" in item[-1]:
232                 tbl_new.append(item)
233             elif "See footnote" in item[-1]:
234                 tbl_see.append(item)
235         else:
236             tbl_delta.append(item)
237
238     # Sort the tables:
239     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
240     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
241     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
242     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
243
244     # Put the tables together:
245     table = list()
246     table.extend(tbl_new)
247     table.extend(tbl_see)
248     table.extend(tbl_delta)
249
250     return table
251
252
253 def table_performance_comparison(table, input_data):
254     """Generate the table(s) with algorithm: table_performance_comparison
255     specified in the specification file.
256
257     :param table: Table to generate.
258     :param input_data: Data to process.
259     :type table: pandas.Series
260     :type input_data: InputData
261     """
262
263     logging.info("  Generating the table {0} ...".
264                  format(table.get("title", "")))
265
266     # Transform the data
267     logging.info("    Creating the data set for the {0} '{1}'.".
268                  format(table.get("type", ""), table.get("title", "")))
269     data = input_data.filter_data(table, continue_on_error=True)
270
271     # Prepare the header of the tables
272     try:
273         header = ["Test case", ]
274
275         if table["include-tests"] == "MRR":
276             hdr_param = "Rec Rate"
277         else:
278             hdr_param = "Thput"
279
280         history = table.get("history", None)
281         if history:
282             for item in history:
283                 header.extend(
284                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
285                      "{0} Stdev [Mpps]".format(item["title"])])
286         header.extend(
287             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
288              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
289              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
290              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
291              "Delta [%]"])
292         header_str = ",".join(header) + "\n"
293     except (AttributeError, KeyError) as err:
294         logging.error("The model is invalid, missing parameter: {0}".
295                       format(err))
296         return
297
298     # Prepare data to the table:
299     tbl_dict = dict()
300     for job, builds in table["reference"]["data"].items():
301         topo = "2n-skx" if "2n-skx" in job else ""
302         for build in builds:
303             for tst_name, tst_data in data[job][str(build)].iteritems():
304                 tst_name_mod = _tpc_modify_test_name(tst_name)
305                 if "across topologies" in table["title"].lower():
306                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
307                 if tbl_dict.get(tst_name_mod, None) is None:
308                     groups = re.search(REGEX_NIC, tst_data["parent"])
309                     nic = groups.group(0) if groups else ""
310                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
311                                                           split("-")[:-1]))
312                     if "across testbeds" in table["title"].lower() or \
313                             "across topologies" in table["title"].lower():
314                         name = _tpc_modify_displayed_test_name(name)
315                     tbl_dict[tst_name_mod] = {"name": name,
316                                               "ref-data": list(),
317                                               "cmp-data": list()}
318                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
319                                  src=tst_data,
320                                  include_tests=table["include-tests"])
321
322     for job, builds in table["compare"]["data"].items():
323         for build in builds:
324             for tst_name, tst_data in data[job][str(build)].iteritems():
325                 tst_name_mod = _tpc_modify_test_name(tst_name)
326                 if "across topologies" in table["title"].lower():
327                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
328                 if tbl_dict.get(tst_name_mod, None) is None:
329                     groups = re.search(REGEX_NIC, tst_data["parent"])
330                     nic = groups.group(0) if groups else ""
331                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
332                                                           split("-")[:-1]))
333                     if "across testbeds" in table["title"].lower() or \
334                             "across topologies" in table["title"].lower():
335                         name = _tpc_modify_displayed_test_name(name)
336                     tbl_dict[tst_name_mod] = {"name": name,
337                                               "ref-data": list(),
338                                               "cmp-data": list()}
339                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
340                                  src=tst_data,
341                                  include_tests=table["include-tests"])
342
343     replacement = table["compare"].get("data-replacement", None)
344     if replacement:
345         create_new_list = True
346         rpl_data = input_data.filter_data(
347             table, data=replacement, continue_on_error=True)
348         for job, builds in replacement.items():
349             for build in builds:
350                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
351                     tst_name_mod = _tpc_modify_test_name(tst_name)
352                     if "across topologies" in table["title"].lower():
353                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
354                     if tbl_dict.get(tst_name_mod, None) is None:
355                         name = "{0}".format("-".join(tst_data["name"].
356                                                      split("-")[:-1]))
357                         if "across testbeds" in table["title"].lower() or \
358                                 "across topologies" in table["title"].lower():
359                             name = _tpc_modify_displayed_test_name(name)
360                         tbl_dict[tst_name_mod] = {"name": name,
361                                                   "ref-data": list(),
362                                                   "cmp-data": list()}
363                     if create_new_list:
364                         create_new_list = False
365                         tbl_dict[tst_name_mod]["cmp-data"] = list()
366
367                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
368                                      src=tst_data,
369                                      include_tests=table["include-tests"])
370
371     if history:
372         for item in history:
373             for job, builds in item["data"].items():
374                 for build in builds:
375                     for tst_name, tst_data in data[job][str(build)].iteritems():
376                         tst_name_mod = _tpc_modify_test_name(tst_name)
377                         if "across topologies" in table["title"].lower():
378                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
379                         if tbl_dict.get(tst_name_mod, None) is None:
380                             continue
381                         if tbl_dict[tst_name_mod].get("history", None) is None:
382                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
383                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
384                                                              None) is None:
385                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
386                                 list()
387                         try:
388                             # TODO: Re-work when NDRPDRDISC tests are not used
389                             if table["include-tests"] == "MRR":
390                                 tbl_dict[tst_name_mod]["history"][item["title"
391                                 ]].append(tst_data["result"]["receive-rate"].
392                                           avg)
393                             elif table["include-tests"] == "PDR":
394                                 if tst_data["type"] == "PDR":
395                                     tbl_dict[tst_name_mod]["history"][
396                                         item["title"]].\
397                                         append(tst_data["throughput"]["value"])
398                                 elif tst_data["type"] == "NDRPDR":
399                                     tbl_dict[tst_name_mod]["history"][item[
400                                         "title"]].append(tst_data["throughput"][
401                                         "PDR"]["LOWER"])
402                             elif table["include-tests"] == "NDR":
403                                 if tst_data["type"] == "NDR":
404                                     tbl_dict[tst_name_mod]["history"][
405                                         item["title"]].\
406                                         append(tst_data["throughput"]["value"])
407                                 elif tst_data["type"] == "NDRPDR":
408                                     tbl_dict[tst_name_mod]["history"][item[
409                                         "title"]].append(tst_data["throughput"][
410                                         "NDR"]["LOWER"])
411                             else:
412                                 continue
413                         except (TypeError, KeyError):
414                             pass
415
416     tbl_lst = list()
417     footnote = False
418     for tst_name in tbl_dict.keys():
419         item = [tbl_dict[tst_name]["name"], ]
420         if history:
421             if tbl_dict[tst_name].get("history", None) is not None:
422                 for hist_data in tbl_dict[tst_name]["history"].values():
423                     if hist_data:
424                         item.append(round(mean(hist_data) / 1000000, 2))
425                         item.append(round(stdev(hist_data) / 1000000, 2))
426                     else:
427                         item.extend(["Not tested", "Not tested"])
428             else:
429                 item.extend(["Not tested", "Not tested"])
430         data_t = tbl_dict[tst_name]["ref-data"]
431         if data_t:
432             item.append(round(mean(data_t) / 1000000, 2))
433             item.append(round(stdev(data_t) / 1000000, 2))
434         else:
435             item.extend(["Not tested", "Not tested"])
436         data_t = tbl_dict[tst_name]["cmp-data"]
437         if data_t:
438             item.append(round(mean(data_t) / 1000000, 2))
439             item.append(round(stdev(data_t) / 1000000, 2))
440         else:
441             item.extend(["Not tested", "Not tested"])
442         if item[-2] == "Not tested":
443             pass
444         elif item[-4] == "Not tested":
445             item.append("New in CSIT-1908")
446         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
447             item.append("See footnote [1]")
448             footnote = True
449         elif item[-4] != 0:
450             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
451         if (len(item) == len(header)) and (item[-3] != "Not tested"):
452             tbl_lst.append(item)
453
454     tbl_lst = _tpc_sort_table(tbl_lst)
455
456     # Generate csv tables:
457     csv_file = "{0}.csv".format(table["output-file"])
458     with open(csv_file, "w") as file_handler:
459         file_handler.write(header_str)
460         for test in tbl_lst:
461             file_handler.write(",".join([str(item) for item in test]) + "\n")
462
463     txt_file_name = "{0}.txt".format(table["output-file"])
464     convert_csv_to_pretty_txt(csv_file, txt_file_name)
465
466     if footnote:
467         with open(txt_file_name, 'a') as txt_file:
468             txt_file.writelines([
469                 "\nFootnotes:\n",
470                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
471                 "2-node testbeds, dot1q encapsulation is now used on both "
472                 "links of SUT.\n",
473                 "    Previously dot1q was used only on a single link with the "
474                 "other link carrying untagged Ethernet frames. This changes "
475                 "results\n",
476                 "    in slightly lower throughput in CSIT-1908 for these "
477                 "tests. See release notes."
478             ])
479
480
481 def table_performance_comparison_nic(table, input_data):
482     """Generate the table(s) with algorithm: table_performance_comparison
483     specified in the specification file.
484
485     :param table: Table to generate.
486     :param input_data: Data to process.
487     :type table: pandas.Series
488     :type input_data: InputData
489     """
490
491     logging.info("  Generating the table {0} ...".
492                  format(table.get("title", "")))
493
494     # Transform the data
495     logging.info("    Creating the data set for the {0} '{1}'.".
496                  format(table.get("type", ""), table.get("title", "")))
497     data = input_data.filter_data(table, continue_on_error=True)
498
499     # Prepare the header of the tables
500     try:
501         header = ["Test case", ]
502
503         if table["include-tests"] == "MRR":
504             hdr_param = "Rec Rate"
505         else:
506             hdr_param = "Thput"
507
508         history = table.get("history", None)
509         if history:
510             for item in history:
511                 header.extend(
512                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
513                      "{0} Stdev [Mpps]".format(item["title"])])
514         header.extend(
515             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
516              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
517              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
518              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
519              "Delta [%]"])
520         header_str = ",".join(header) + "\n"
521     except (AttributeError, KeyError) as err:
522         logging.error("The model is invalid, missing parameter: {0}".
523                       format(err))
524         return
525
526     # Prepare data to the table:
527     tbl_dict = dict()
528     for job, builds in table["reference"]["data"].items():
529         topo = "2n-skx" if "2n-skx" in job else ""
530         for build in builds:
531             for tst_name, tst_data in data[job][str(build)].iteritems():
532                 if table["reference"]["nic"] not in tst_data["tags"]:
533                     continue
534                 tst_name_mod = _tpc_modify_test_name(tst_name)
535                 if "across topologies" in table["title"].lower():
536                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
537                 if tbl_dict.get(tst_name_mod, None) is None:
538                     name = "{0}".format("-".join(tst_data["name"].
539                                                  split("-")[:-1]))
540                     if "across testbeds" in table["title"].lower() or \
541                             "across topologies" in table["title"].lower():
542                         name = _tpc_modify_displayed_test_name(name)
543                     tbl_dict[tst_name_mod] = {"name": name,
544                                               "ref-data": list(),
545                                               "cmp-data": list()}
546                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
547                                  src=tst_data,
548                                  include_tests=table["include-tests"])
549
550     for job, builds in table["compare"]["data"].items():
551         for build in builds:
552             for tst_name, tst_data in data[job][str(build)].iteritems():
553                 if table["compare"]["nic"] not in tst_data["tags"]:
554                     continue
555                 tst_name_mod = _tpc_modify_test_name(tst_name)
556                 if "across topologies" in table["title"].lower():
557                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
558                 if tbl_dict.get(tst_name_mod, None) is None:
559                     name = "{0}".format("-".join(tst_data["name"].
560                                                  split("-")[:-1]))
561                     if "across testbeds" in table["title"].lower() or \
562                             "across topologies" in table["title"].lower():
563                         name = _tpc_modify_displayed_test_name(name)
564                     tbl_dict[tst_name_mod] = {"name": name,
565                                               "ref-data": list(),
566                                               "cmp-data": list()}
567                 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
568                                  src=tst_data,
569                                  include_tests=table["include-tests"])
570
571     replacement = table["compare"].get("data-replacement", None)
572     if replacement:
573         create_new_list = True
574         rpl_data = input_data.filter_data(
575             table, data=replacement, continue_on_error=True)
576         for job, builds in replacement.items():
577             for build in builds:
578                 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
579                     if table["compare"]["nic"] not in tst_data["tags"]:
580                         continue
581                     tst_name_mod = _tpc_modify_test_name(tst_name)
582                     if "across topologies" in table["title"].lower():
583                         tst_name_mod = tst_name_mod.replace("2n1l-", "")
584                     if tbl_dict.get(tst_name_mod, None) is None:
585                         name = "{0}".format("-".join(tst_data["name"].
586                                                      split("-")[:-1]))
587                         if "across testbeds" in table["title"].lower() or \
588                                 "across topologies" in table["title"].lower():
589                             name = _tpc_modify_displayed_test_name(name)
590                         tbl_dict[tst_name_mod] = {"name": name,
591                                                   "ref-data": list(),
592                                                   "cmp-data": list()}
593                     if create_new_list:
594                         create_new_list = False
595                         tbl_dict[tst_name_mod]["cmp-data"] = list()
596
597                     _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
598                                      src=tst_data,
599                                      include_tests=table["include-tests"])
600
601     if history:
602         for item in history:
603             for job, builds in item["data"].items():
604                 for build in builds:
605                     for tst_name, tst_data in data[job][str(build)].iteritems():
606                         if item["nic"] not in tst_data["tags"]:
607                             continue
608                         tst_name_mod = _tpc_modify_test_name(tst_name)
609                         if "across topologies" in table["title"].lower():
610                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
611                         if tbl_dict.get(tst_name_mod, None) is None:
612                             continue
613                         if tbl_dict[tst_name_mod].get("history", None) is None:
614                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
615                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
616                                                              None) is None:
617                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
618                                 list()
619                         try:
620                             # TODO: Re-work when NDRPDRDISC tests are not used
621                             if table["include-tests"] == "MRR":
622                                 tbl_dict[tst_name_mod]["history"][item["title"
623                                 ]].append(tst_data["result"]["receive-rate"].
624                                           avg)
625                             elif table["include-tests"] == "PDR":
626                                 if tst_data["type"] == "PDR":
627                                     tbl_dict[tst_name_mod]["history"][
628                                         item["title"]].\
629                                         append(tst_data["throughput"]["value"])
630                                 elif tst_data["type"] == "NDRPDR":
631                                     tbl_dict[tst_name_mod]["history"][item[
632                                         "title"]].append(tst_data["throughput"][
633                                         "PDR"]["LOWER"])
634                             elif table["include-tests"] == "NDR":
635                                 if tst_data["type"] == "NDR":
636                                     tbl_dict[tst_name_mod]["history"][
637                                         item["title"]].\
638                                         append(tst_data["throughput"]["value"])
639                                 elif tst_data["type"] == "NDRPDR":
640                                     tbl_dict[tst_name_mod]["history"][item[
641                                         "title"]].append(tst_data["throughput"][
642                                         "NDR"]["LOWER"])
643                             else:
644                                 continue
645                         except (TypeError, KeyError):
646                             pass
647
648     tbl_lst = list()
649     footnote = False
650     for tst_name in tbl_dict.keys():
651         item = [tbl_dict[tst_name]["name"], ]
652         if history:
653             if tbl_dict[tst_name].get("history", None) is not None:
654                 for hist_data in tbl_dict[tst_name]["history"].values():
655                     if hist_data:
656                         item.append(round(mean(hist_data) / 1000000, 2))
657                         item.append(round(stdev(hist_data) / 1000000, 2))
658                     else:
659                         item.extend(["Not tested", "Not tested"])
660             else:
661                 item.extend(["Not tested", "Not tested"])
662         data_t = tbl_dict[tst_name]["ref-data"]
663         if data_t:
664             item.append(round(mean(data_t) / 1000000, 2))
665             item.append(round(stdev(data_t) / 1000000, 2))
666         else:
667             item.extend(["Not tested", "Not tested"])
668         data_t = tbl_dict[tst_name]["cmp-data"]
669         if data_t:
670             item.append(round(mean(data_t) / 1000000, 2))
671             item.append(round(stdev(data_t) / 1000000, 2))
672         else:
673             item.extend(["Not tested", "Not tested"])
674         if item[-2] == "Not tested":
675             pass
676         elif item[-4] == "Not tested":
677             item.append("New in CSIT-1908")
678         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
679             item.append("See footnote [1]")
680             footnote = True
681         elif item[-4] != 0:
682             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
683         if (len(item) == len(header)) and (item[-3] != "Not tested"):
684             tbl_lst.append(item)
685
686     tbl_lst = _tpc_sort_table(tbl_lst)
687
688     # Generate csv tables:
689     csv_file = "{0}.csv".format(table["output-file"])
690     with open(csv_file, "w") as file_handler:
691         file_handler.write(header_str)
692         for test in tbl_lst:
693             file_handler.write(",".join([str(item) for item in test]) + "\n")
694
695     txt_file_name = "{0}.txt".format(table["output-file"])
696     convert_csv_to_pretty_txt(csv_file, txt_file_name)
697
698     if footnote:
699         with open(txt_file_name, 'a') as txt_file:
700             txt_file.writelines([
701                 "\nFootnotes:\n",
702                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
703                 "2-node testbeds, dot1q encapsulation is now used on both "
704                 "links of SUT.\n",
705                 "    Previously dot1q was used only on a single link with the "
706                 "other link carrying untagged Ethernet frames. This changes "
707                 "results\n",
708                 "    in slightly lower throughput in CSIT-1908 for these "
709                 "tests. See release notes."
710             ])
711
712
713 def table_nics_comparison(table, input_data):
714     """Generate the table(s) with algorithm: table_nics_comparison
715     specified in the specification file.
716
717     :param table: Table to generate.
718     :param input_data: Data to process.
719     :type table: pandas.Series
720     :type input_data: InputData
721     """
722
723     logging.info("  Generating the table {0} ...".
724                  format(table.get("title", "")))
725
726     # Transform the data
727     logging.info("    Creating the data set for the {0} '{1}'.".
728                  format(table.get("type", ""), table.get("title", "")))
729     data = input_data.filter_data(table, continue_on_error=True)
730
731     # Prepare the header of the tables
732     try:
733         header = ["Test case", ]
734
735         if table["include-tests"] == "MRR":
736             hdr_param = "Rec Rate"
737         else:
738             hdr_param = "Thput"
739
740         header.extend(
741             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
742              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
743              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
744              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
745              "Delta [%]"])
746         header_str = ",".join(header) + "\n"
747     except (AttributeError, KeyError) as err:
748         logging.error("The model is invalid, missing parameter: {0}".
749                       format(err))
750         return
751
752     # Prepare data to the table:
753     tbl_dict = dict()
754     for job, builds in table["data"].items():
755         for build in builds:
756             for tst_name, tst_data in data[job][str(build)].iteritems():
757                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
758                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
759                     replace("-ndrdisc", "").replace("-pdr", "").\
760                     replace("-ndr", "").\
761                     replace("1t1c", "1c").replace("2t1c", "1c").\
762                     replace("2t2c", "2c").replace("4t2c", "2c").\
763                     replace("4t4c", "4c").replace("8t4c", "4c")
764                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
765                 if tbl_dict.get(tst_name_mod, None) is None:
766                     name = "-".join(tst_data["name"].split("-")[:-1])
767                     tbl_dict[tst_name_mod] = {"name": name,
768                                               "ref-data": list(),
769                                               "cmp-data": list()}
770                 try:
771                     if table["include-tests"] == "MRR":
772                         result = tst_data["result"]["receive-rate"].avg
773                     elif table["include-tests"] == "PDR":
774                         result = tst_data["throughput"]["PDR"]["LOWER"]
775                     elif table["include-tests"] == "NDR":
776                         result = tst_data["throughput"]["NDR"]["LOWER"]
777                     else:
778                         result = None
779
780                     if result:
781                         if table["reference"]["nic"] in tst_data["tags"]:
782                             tbl_dict[tst_name_mod]["ref-data"].append(result)
783                         elif table["compare"]["nic"] in tst_data["tags"]:
784                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
785                 except (TypeError, KeyError) as err:
786                     logging.debug("No data for {0}".format(tst_name))
787                     logging.debug(repr(err))
788                     # No data in output.xml for this test
789
790     tbl_lst = list()
791     for tst_name in tbl_dict.keys():
792         item = [tbl_dict[tst_name]["name"], ]
793         data_t = tbl_dict[tst_name]["ref-data"]
794         if data_t:
795             item.append(round(mean(data_t) / 1000000, 2))
796             item.append(round(stdev(data_t) / 1000000, 2))
797         else:
798             item.extend([None, None])
799         data_t = tbl_dict[tst_name]["cmp-data"]
800         if data_t:
801             item.append(round(mean(data_t) / 1000000, 2))
802             item.append(round(stdev(data_t) / 1000000, 2))
803         else:
804             item.extend([None, None])
805         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
806             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
807         if len(item) == len(header):
808             tbl_lst.append(item)
809
810     # Sort the table according to the relative change
811     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
812
813     # Generate csv tables:
814     csv_file = "{0}.csv".format(table["output-file"])
815     with open(csv_file, "w") as file_handler:
816         file_handler.write(header_str)
817         for test in tbl_lst:
818             file_handler.write(",".join([str(item) for item in test]) + "\n")
819
820     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
821
822
823 def table_soak_vs_ndr(table, input_data):
824     """Generate the table(s) with algorithm: table_soak_vs_ndr
825     specified in the specification file.
826
827     :param table: Table to generate.
828     :param input_data: Data to process.
829     :type table: pandas.Series
830     :type input_data: InputData
831     """
832
833     logging.info("  Generating the table {0} ...".
834                  format(table.get("title", "")))
835
836     # Transform the data
837     logging.info("    Creating the data set for the {0} '{1}'.".
838                  format(table.get("type", ""), table.get("title", "")))
839     data = input_data.filter_data(table, continue_on_error=True)
840
841     # Prepare the header of the table
842     try:
843         header = [
844             "Test case",
845             "{0} Thput [Mpps]".format(table["reference"]["title"]),
846             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
847             "{0} Thput [Mpps]".format(table["compare"]["title"]),
848             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
849             "Delta [%]", "Stdev of delta [%]"]
850         header_str = ",".join(header) + "\n"
851     except (AttributeError, KeyError) as err:
852         logging.error("The model is invalid, missing parameter: {0}".
853                       format(err))
854         return
855
856     # Create a list of available SOAK test results:
857     tbl_dict = dict()
858     for job, builds in table["compare"]["data"].items():
859         for build in builds:
860             for tst_name, tst_data in data[job][str(build)].iteritems():
861                 if tst_data["type"] == "SOAK":
862                     tst_name_mod = tst_name.replace("-soak", "")
863                     if tbl_dict.get(tst_name_mod, None) is None:
864                         groups = re.search(REGEX_NIC, tst_data["parent"])
865                         nic = groups.group(0) if groups else ""
866                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
867                                                               split("-")[:-1]))
868                         tbl_dict[tst_name_mod] = {
869                             "name": name,
870                             "ref-data": list(),
871                             "cmp-data": list()
872                         }
873                     try:
874                         tbl_dict[tst_name_mod]["cmp-data"].append(
875                             tst_data["throughput"]["LOWER"])
876                     except (KeyError, TypeError):
877                         pass
878     tests_lst = tbl_dict.keys()
879
880     # Add corresponding NDR test results:
881     for job, builds in table["reference"]["data"].items():
882         for build in builds:
883             for tst_name, tst_data in data[job][str(build)].iteritems():
884                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
885                     replace("-mrr", "")
886                 if tst_name_mod in tests_lst:
887                     try:
888                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
889                             if table["include-tests"] == "MRR":
890                                 result = tst_data["result"]["receive-rate"].avg
891                             elif table["include-tests"] == "PDR":
892                                 result = tst_data["throughput"]["PDR"]["LOWER"]
893                             elif table["include-tests"] == "NDR":
894                                 result = tst_data["throughput"]["NDR"]["LOWER"]
895                             else:
896                                 result = None
897                             if result is not None:
898                                 tbl_dict[tst_name_mod]["ref-data"].append(
899                                     result)
900                     except (KeyError, TypeError):
901                         continue
902
903     tbl_lst = list()
904     for tst_name in tbl_dict.keys():
905         item = [tbl_dict[tst_name]["name"], ]
906         data_r = tbl_dict[tst_name]["ref-data"]
907         if data_r:
908             data_r_mean = mean(data_r)
909             item.append(round(data_r_mean / 1000000, 2))
910             data_r_stdev = stdev(data_r)
911             item.append(round(data_r_stdev / 1000000, 2))
912         else:
913             data_r_mean = None
914             data_r_stdev = None
915             item.extend([None, None])
916         data_c = tbl_dict[tst_name]["cmp-data"]
917         if data_c:
918             data_c_mean = mean(data_c)
919             item.append(round(data_c_mean / 1000000, 2))
920             data_c_stdev = stdev(data_c)
921             item.append(round(data_c_stdev / 1000000, 2))
922         else:
923             data_c_mean = None
924             data_c_stdev = None
925             item.extend([None, None])
926         if data_r_mean and data_c_mean:
927             delta, d_stdev = relative_change_stdev(
928                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
929             item.append(round(delta, 2))
930             item.append(round(d_stdev, 2))
931             tbl_lst.append(item)
932
933     # Sort the table according to the relative change
934     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
935
936     # Generate csv tables:
937     csv_file = "{0}.csv".format(table["output-file"])
938     with open(csv_file, "w") as file_handler:
939         file_handler.write(header_str)
940         for test in tbl_lst:
941             file_handler.write(",".join([str(item) for item in test]) + "\n")
942
943     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
944
945
946 def table_performance_trending_dashboard(table, input_data):
947     """Generate the table(s) with algorithm:
948     table_performance_trending_dashboard
949     specified in the specification file.
950
951     :param table: Table to generate.
952     :param input_data: Data to process.
953     :type table: pandas.Series
954     :type input_data: InputData
955     """
956
957     logging.info("  Generating the table {0} ...".
958                  format(table.get("title", "")))
959
960     # Transform the data
961     logging.info("    Creating the data set for the {0} '{1}'.".
962                  format(table.get("type", ""), table.get("title", "")))
963     data = input_data.filter_data(table, continue_on_error=True)
964
965     # Prepare the header of the tables
966     header = ["Test Case",
967               "Trend [Mpps]",
968               "Short-Term Change [%]",
969               "Long-Term Change [%]",
970               "Regressions [#]",
971               "Progressions [#]"
972               ]
973     header_str = ",".join(header) + "\n"
974
975     # Prepare data to the table:
976     tbl_dict = dict()
977     for job, builds in table["data"].items():
978         for build in builds:
979             for tst_name, tst_data in data[job][str(build)].iteritems():
980                 if tst_name.lower() in table.get("ignore-list", list()):
981                     continue
982                 if tbl_dict.get(tst_name, None) is None:
983                     groups = re.search(REGEX_NIC, tst_data["parent"])
984                     if not groups:
985                         continue
986                     nic = groups.group(0)
987                     tbl_dict[tst_name] = {
988                         "name": "{0}-{1}".format(nic, tst_data["name"]),
989                         "data": OrderedDict()}
990                 try:
991                     tbl_dict[tst_name]["data"][str(build)] = \
992                         tst_data["result"]["receive-rate"]
993                 except (TypeError, KeyError):
994                     pass  # No data in output.xml for this test
995
996     tbl_lst = list()
997     for tst_name in tbl_dict.keys():
998         data_t = tbl_dict[tst_name]["data"]
999         if len(data_t) < 2:
1000             continue
1001
1002         classification_lst, avgs = classify_anomalies(data_t)
1003
1004         win_size = min(len(data_t), table["window"])
1005         long_win_size = min(len(data_t), table["long-trend-window"])
1006
1007         try:
1008             max_long_avg = max(
1009                 [x for x in avgs[-long_win_size:-win_size]
1010                  if not isnan(x)])
1011         except ValueError:
1012             max_long_avg = nan
1013         last_avg = avgs[-1]
1014         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1015
1016         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1017             rel_change_last = nan
1018         else:
1019             rel_change_last = round(
1020                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1021
1022         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1023             rel_change_long = nan
1024         else:
1025             rel_change_long = round(
1026                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1027
1028         if classification_lst:
1029             if isnan(rel_change_last) and isnan(rel_change_long):
1030                 continue
1031             if (isnan(last_avg) or
1032                 isnan(rel_change_last) or
1033                 isnan(rel_change_long)):
1034                 continue
1035             tbl_lst.append(
1036                 [tbl_dict[tst_name]["name"],
1037                  round(last_avg / 1000000, 2),
1038                  rel_change_last,
1039                  rel_change_long,
1040                  classification_lst[-win_size:].count("regression"),
1041                  classification_lst[-win_size:].count("progression")])
1042
1043     tbl_lst.sort(key=lambda rel: rel[0])
1044
1045     tbl_sorted = list()
1046     for nrr in range(table["window"], -1, -1):
1047         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1048         for nrp in range(table["window"], -1, -1):
1049             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1050             tbl_out.sort(key=lambda rel: rel[2])
1051             tbl_sorted.extend(tbl_out)
1052
1053     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1054
1055     logging.info("    Writing file: '{0}'".format(file_name))
1056     with open(file_name, "w") as file_handler:
1057         file_handler.write(header_str)
1058         for test in tbl_sorted:
1059             file_handler.write(",".join([str(item) for item in test]) + '\n')
1060
1061     txt_file_name = "{0}.txt".format(table["output-file"])
1062     logging.info("    Writing file: '{0}'".format(txt_file_name))
1063     convert_csv_to_pretty_txt(file_name, txt_file_name)
1064
1065
1066 def _generate_url(base, testbed, test_name):
1067     """Generate URL to a trending plot from the name of the test case.
1068
1069     :param base: The base part of URL common to all test cases.
1070     :param testbed: The testbed used for testing.
1071     :param test_name: The name of the test case.
1072     :type base: str
1073     :type testbed: str
1074     :type test_name: str
1075     :returns: The URL to the plot with the trending data for the given test
1076         case.
1077     :rtype str
1078     """
1079
1080     url = base
1081     file_name = ""
1082     anchor = ".html#"
1083     feature = ""
1084
1085     if "lbdpdk" in test_name or "lbvpp" in test_name:
1086         file_name = "link_bonding"
1087
1088     elif "114b" in test_name and "vhost" in test_name:
1089         file_name = "vts"
1090
1091     elif "testpmd" in test_name or "l3fwd" in test_name:
1092         file_name = "dpdk"
1093
1094     elif "memif" in test_name:
1095         file_name = "container_memif"
1096         feature = "-base"
1097
1098     elif "srv6" in test_name:
1099         file_name = "srv6"
1100
1101     elif "vhost" in test_name:
1102         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1103             file_name = "vm_vhost_l2"
1104             if "114b" in test_name:
1105                 feature = ""
1106             elif "l2xcbase" in test_name and "x520" in test_name:
1107                 feature = "-base-l2xc"
1108             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1109                 feature = "-base-l2bd"
1110             else:
1111                 feature = "-base"
1112         elif "ip4base" in test_name:
1113             file_name = "vm_vhost_ip4"
1114             feature = "-base"
1115
1116     elif "ipsecbasetnlsw" in test_name:
1117         file_name = "ipsecsw"
1118         feature = "-base-scale"
1119
1120     elif "ipsec" in test_name:
1121         file_name = "ipsec"
1122         feature = "-base-scale"
1123         if "hw-" in test_name:
1124             file_name = "ipsechw"
1125         elif "sw-" in test_name:
1126             file_name = "ipsecsw"
1127
1128     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1129         file_name = "ip4_tunnels"
1130         feature = "-base"
1131
1132     elif "ip4base" in test_name or "ip4scale" in test_name:
1133         file_name = "ip4"
1134         if "xl710" in test_name:
1135             feature = "-base-scale-features"
1136         elif "iacl" in test_name:
1137             feature = "-features-iacl"
1138         elif "oacl" in test_name:
1139             feature = "-features-oacl"
1140         elif "snat" in test_name or "cop" in test_name:
1141             feature = "-features"
1142         else:
1143             feature = "-base-scale"
1144
1145     elif "ip6base" in test_name or "ip6scale" in test_name:
1146         file_name = "ip6"
1147         feature = "-base-scale"
1148
1149     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1150             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1151             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1152         file_name = "l2"
1153         if "macip" in test_name:
1154             feature = "-features-macip"
1155         elif "iacl" in test_name:
1156             feature = "-features-iacl"
1157         elif "oacl" in test_name:
1158             feature = "-features-oacl"
1159         else:
1160             feature = "-base-scale"
1161
1162     if "x520" in test_name:
1163         nic = "x520-"
1164     elif "x710" in test_name:
1165         nic = "x710-"
1166     elif "xl710" in test_name:
1167         nic = "xl710-"
1168     elif "xxv710" in test_name:
1169         nic = "xxv710-"
1170     elif "vic1227" in test_name:
1171         nic = "vic1227-"
1172     elif "vic1385" in test_name:
1173         nic = "vic1385-"
1174     else:
1175         nic = ""
1176     anchor += nic
1177
1178     if "64b" in test_name:
1179         framesize = "64b"
1180     elif "78b" in test_name:
1181         framesize = "78b"
1182     elif "imix" in test_name:
1183         framesize = "imix"
1184     elif "9000b" in test_name:
1185         framesize = "9000b"
1186     elif "1518b" in test_name:
1187         framesize = "1518b"
1188     elif "114b" in test_name:
1189         framesize = "114b"
1190     else:
1191         framesize = ""
1192     anchor += framesize + '-'
1193
1194     if "1t1c" in test_name:
1195         anchor += "1t1c"
1196     elif "2t2c" in test_name:
1197         anchor += "2t2c"
1198     elif "4t4c" in test_name:
1199         anchor += "4t4c"
1200     elif "2t1c" in test_name:
1201         anchor += "2t1c"
1202     elif "4t2c" in test_name:
1203         anchor += "4t2c"
1204     elif "8t4c" in test_name:
1205         anchor += "8t4c"
1206
1207     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1208         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1209
1210
1211 def table_performance_trending_dashboard_html(table, input_data):
1212     """Generate the table(s) with algorithm:
1213     table_performance_trending_dashboard_html specified in the specification
1214     file.
1215
1216     :param table: Table to generate.
1217     :param input_data: Data to process.
1218     :type table: dict
1219     :type input_data: InputData
1220     """
1221
1222     testbed = table.get("testbed", None)
1223     if testbed is None:
1224         logging.error("The testbed is not defined for the table '{0}'.".
1225                       format(table.get("title", "")))
1226         return
1227
1228     logging.info("  Generating the table {0} ...".
1229                  format(table.get("title", "")))
1230
1231     try:
1232         with open(table["input-file"], 'rb') as csv_file:
1233             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1234             csv_lst = [item for item in csv_content]
1235     except KeyError:
1236         logging.warning("The input file is not defined.")
1237         return
1238     except csv.Error as err:
1239         logging.warning("Not possible to process the file '{0}'.\n{1}".
1240                         format(table["input-file"], err))
1241         return
1242
1243     # Table:
1244     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1245
1246     # Table header:
1247     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1248     for idx, item in enumerate(csv_lst[0]):
1249         alignment = "left" if idx == 0 else "center"
1250         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1251         th.text = item
1252
1253     # Rows:
1254     colors = {"regression": ("#ffcccc", "#ff9999"),
1255               "progression": ("#c6ecc6", "#9fdf9f"),
1256               "normal": ("#e9f1fb", "#d4e4f7")}
1257     for r_idx, row in enumerate(csv_lst[1:]):
1258         if int(row[4]):
1259             color = "regression"
1260         elif int(row[5]):
1261             color = "progression"
1262         else:
1263             color = "normal"
1264         background = colors[color][r_idx % 2]
1265         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1266
1267         # Columns:
1268         for c_idx, item in enumerate(row):
1269             alignment = "left" if c_idx == 0 else "center"
1270             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1271             # Name:
1272             if c_idx == 0:
1273                 url = _generate_url("../trending/", testbed, item)
1274                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1275                 ref.text = item
1276             else:
1277                 td.text = item
1278     try:
1279         with open(table["output-file"], 'w') as html_file:
1280             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1281             html_file.write(".. raw:: html\n\n\t")
1282             html_file.write(ET.tostring(dashboard))
1283             html_file.write("\n\t<p><br><br></p>\n")
1284     except KeyError:
1285         logging.warning("The output file is not defined.")
1286         return
1287
1288
1289 def table_last_failed_tests(table, input_data):
1290     """Generate the table(s) with algorithm: table_last_failed_tests
1291     specified in the specification file.
1292
1293     :param table: Table to generate.
1294     :param input_data: Data to process.
1295     :type table: pandas.Series
1296     :type input_data: InputData
1297     """
1298
1299     logging.info("  Generating the table {0} ...".
1300                  format(table.get("title", "")))
1301
1302     # Transform the data
1303     logging.info("    Creating the data set for the {0} '{1}'.".
1304                  format(table.get("type", ""), table.get("title", "")))
1305     data = input_data.filter_data(table, continue_on_error=True)
1306
1307     if data is None or data.empty:
1308         logging.warn("    No data for the {0} '{1}'.".
1309                      format(table.get("type", ""), table.get("title", "")))
1310         return
1311
1312     tbl_list = list()
1313     for job, builds in table["data"].items():
1314         for build in builds:
1315             build = str(build)
1316             try:
1317                 version = input_data.metadata(job, build).get("version", "")
1318             except KeyError:
1319                 logging.error("Data for {job}: {build} is not present.".
1320                               format(job=job, build=build))
1321                 return
1322             tbl_list.append(build)
1323             tbl_list.append(version)
1324             for tst_name, tst_data in data[job][build].iteritems():
1325                 if tst_data["status"] != "FAIL":
1326                     continue
1327                 groups = re.search(REGEX_NIC, tst_data["parent"])
1328                 if not groups:
1329                     continue
1330                 nic = groups.group(0)
1331                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1332
1333     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1334     logging.info("    Writing file: '{0}'".format(file_name))
1335     with open(file_name, "w") as file_handler:
1336         for test in tbl_list:
1337             file_handler.write(test + '\n')
1338
1339
1340 def table_failed_tests(table, input_data):
1341     """Generate the table(s) with algorithm: table_failed_tests
1342     specified in the specification file.
1343
1344     :param table: Table to generate.
1345     :param input_data: Data to process.
1346     :type table: pandas.Series
1347     :type input_data: InputData
1348     """
1349
1350     logging.info("  Generating the table {0} ...".
1351                  format(table.get("title", "")))
1352
1353     # Transform the data
1354     logging.info("    Creating the data set for the {0} '{1}'.".
1355                  format(table.get("type", ""), table.get("title", "")))
1356     data = input_data.filter_data(table, continue_on_error=True)
1357
1358     # Prepare the header of the tables
1359     header = ["Test Case",
1360               "Failures [#]",
1361               "Last Failure [Time]",
1362               "Last Failure [VPP-Build-Id]",
1363               "Last Failure [CSIT-Job-Build-Id]"]
1364
1365     # Generate the data for the table according to the model in the table
1366     # specification
1367
1368     now = dt.utcnow()
1369     timeperiod = timedelta(int(table.get("window", 7)))
1370
1371     tbl_dict = dict()
1372     for job, builds in table["data"].items():
1373         for build in builds:
1374             build = str(build)
1375             for tst_name, tst_data in data[job][build].iteritems():
1376                 if tst_name.lower() in table.get("ignore-list", list()):
1377                     continue
1378                 if tbl_dict.get(tst_name, None) is None:
1379                     groups = re.search(REGEX_NIC, tst_data["parent"])
1380                     if not groups:
1381                         continue
1382                     nic = groups.group(0)
1383                     tbl_dict[tst_name] = {
1384                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1385                         "data": OrderedDict()}
1386                 try:
1387                     generated = input_data.metadata(job, build).\
1388                         get("generated", "")
1389                     if not generated:
1390                         continue
1391                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1392                     if (now - then) <= timeperiod:
1393                         tbl_dict[tst_name]["data"][build] = (
1394                             tst_data["status"],
1395                             generated,
1396                             input_data.metadata(job, build).get("version", ""),
1397                             build)
1398                 except (TypeError, KeyError) as err:
1399                     logging.warning("tst_name: {} - err: {}".
1400                                     format(tst_name, repr(err)))
1401
1402     max_fails = 0
1403     tbl_lst = list()
1404     for tst_data in tbl_dict.values():
1405         fails_nr = 0
1406         for val in tst_data["data"].values():
1407             if val[0] == "FAIL":
1408                 fails_nr += 1
1409                 fails_last_date = val[1]
1410                 fails_last_vpp = val[2]
1411                 fails_last_csit = val[3]
1412         if fails_nr:
1413             max_fails = fails_nr if fails_nr > max_fails else max_fails
1414             tbl_lst.append([tst_data["name"],
1415                             fails_nr,
1416                             fails_last_date,
1417                             fails_last_vpp,
1418                             "mrr-daily-build-{0}".format(fails_last_csit)])
1419
1420     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1421     tbl_sorted = list()
1422     for nrf in range(max_fails, -1, -1):
1423         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1424         tbl_sorted.extend(tbl_fails)
1425     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1426
1427     logging.info("    Writing file: '{0}'".format(file_name))
1428     with open(file_name, "w") as file_handler:
1429         file_handler.write(",".join(header) + "\n")
1430         for test in tbl_sorted:
1431             file_handler.write(",".join([str(item) for item in test]) + '\n')
1432
1433     txt_file_name = "{0}.txt".format(table["output-file"])
1434     logging.info("    Writing file: '{0}'".format(txt_file_name))
1435     convert_csv_to_pretty_txt(file_name, txt_file_name)
1436
1437
1438 def table_failed_tests_html(table, input_data):
1439     """Generate the table(s) with algorithm: table_failed_tests_html
1440     specified in the specification file.
1441
1442     :param table: Table to generate.
1443     :param input_data: Data to process.
1444     :type table: pandas.Series
1445     :type input_data: InputData
1446     """
1447
1448     testbed = table.get("testbed", None)
1449     if testbed is None:
1450         logging.error("The testbed is not defined for the table '{0}'.".
1451                       format(table.get("title", "")))
1452         return
1453
1454     logging.info("  Generating the table {0} ...".
1455                  format(table.get("title", "")))
1456
1457     try:
1458         with open(table["input-file"], 'rb') as csv_file:
1459             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1460             csv_lst = [item for item in csv_content]
1461     except KeyError:
1462         logging.warning("The input file is not defined.")
1463         return
1464     except csv.Error as err:
1465         logging.warning("Not possible to process the file '{0}'.\n{1}".
1466                         format(table["input-file"], err))
1467         return
1468
1469     # Table:
1470     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1471
1472     # Table header:
1473     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1474     for idx, item in enumerate(csv_lst[0]):
1475         alignment = "left" if idx == 0 else "center"
1476         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1477         th.text = item
1478
1479     # Rows:
1480     colors = ("#e9f1fb", "#d4e4f7")
1481     for r_idx, row in enumerate(csv_lst[1:]):
1482         background = colors[r_idx % 2]
1483         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1484
1485         # Columns:
1486         for c_idx, item in enumerate(row):
1487             alignment = "left" if c_idx == 0 else "center"
1488             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1489             # Name:
1490             if c_idx == 0:
1491                 url = _generate_url("../trending/", testbed, item)
1492                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1493                 ref.text = item
1494             else:
1495                 td.text = item
1496     try:
1497         with open(table["output-file"], 'w') as html_file:
1498             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1499             html_file.write(".. raw:: html\n\n\t")
1500             html_file.write(ET.tostring(failed_tests))
1501             html_file.write("\n\t<p><br><br></p>\n")
1502     except KeyError:
1503         logging.warning("The output file is not defined.")
1504         return