Report: Comparison tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Rec Rate"
214         else:
215             hdr_param = "Thput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         topo = "2n-skx" if "2n-skx" in job else ""
239         for build in builds:
240             for tst_name, tst_data in data[job][str(build)].iteritems():
241                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
243                     replace("-ndrdisc", "").replace("-pdr", "").\
244                     replace("-ndr", "").\
245                     replace("1t1c", "1c").replace("2t1c", "1c").\
246                     replace("2t2c", "2c").replace("4t2c", "2c").\
247                     replace("4t4c", "4c").replace("8t4c", "4c")
248                 if "across topologies" in table["title"].lower():
249                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
250                 if tbl_dict.get(tst_name_mod, None) is None:
251                     groups = re.search(REGEX_NIC, tst_data["parent"])
252                     nic = groups.group(0) if groups else ""
253                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254                                                           split("-")[:-1]))
255                     if "across testbeds" in table["title"].lower() or \
256                             "across topologies" in table["title"].lower():
257                         name = name.\
258                             replace("1t1c", "1c").replace("2t1c", "1c").\
259                             replace("2t2c", "2c").replace("4t2c", "2c").\
260                             replace("4t4c", "4c").replace("8t4c", "4c")
261                     tbl_dict[tst_name_mod] = {"name": name,
262                                               "ref-data": list(),
263                                               "cmp-data": list()}
264                 try:
265                     # TODO: Re-work when NDRPDRDISC tests are not used
266                     if table["include-tests"] == "MRR":
267                         tbl_dict[tst_name_mod]["ref-data"]. \
268                             append(tst_data["result"]["receive-rate"].avg)
269                     elif table["include-tests"] == "PDR":
270                         if tst_data["type"] == "PDR":
271                             tbl_dict[tst_name_mod]["ref-data"]. \
272                                 append(tst_data["throughput"]["value"])
273                         elif tst_data["type"] == "NDRPDR":
274                             tbl_dict[tst_name_mod]["ref-data"].append(
275                                 tst_data["throughput"]["PDR"]["LOWER"])
276                     elif table["include-tests"] == "NDR":
277                         if tst_data["type"] == "NDR":
278                             tbl_dict[tst_name_mod]["ref-data"]. \
279                                 append(tst_data["throughput"]["value"])
280                         elif tst_data["type"] == "NDRPDR":
281                             tbl_dict[tst_name_mod]["ref-data"].append(
282                                 tst_data["throughput"]["NDR"]["LOWER"])
283                     else:
284                         continue
285                 except TypeError:
286                     pass  # No data in output.xml for this test
287
288     for job, builds in table["compare"]["data"].items():
289         for build in builds:
290             for tst_name, tst_data in data[job][str(build)].iteritems():
291                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293                     replace("-ndrdisc", "").replace("-pdr", ""). \
294                     replace("-ndr", "").\
295                     replace("1t1c", "1c").replace("2t1c", "1c").\
296                     replace("2t2c", "2c").replace("4t2c", "2c").\
297                     replace("4t4c", "4c").replace("8t4c", "4c")
298                 if "across topologies" in table["title"].lower():
299                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
300                 if tbl_dict.get(tst_name_mod, None) is None:
301                     groups = re.search(REGEX_NIC, tst_data["parent"])
302                     nic = groups.group(0) if groups else ""
303                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
304                                                           split("-")[:-1]))
305                     if "across testbeds" in table["title"].lower() or \
306                             "across topologies" in table["title"].lower():
307                         name = name.\
308                             replace("1t1c", "1c").replace("2t1c", "1c").\
309                             replace("2t2c", "2c").replace("4t2c", "2c").\
310                             replace("4t4c", "4c").replace("8t4c", "4c")
311                     tbl_dict[tst_name_mod] = {"name": name,
312                                               "ref-data": list(),
313                                               "cmp-data": list()}
314                 try:
315                     # TODO: Re-work when NDRPDRDISC tests are not used
316                     if table["include-tests"] == "MRR":
317                         tbl_dict[tst_name_mod]["cmp-data"]. \
318                             append(tst_data["result"]["receive-rate"].avg)
319                     elif table["include-tests"] == "PDR":
320                         if tst_data["type"] == "PDR":
321                             tbl_dict[tst_name_mod]["cmp-data"]. \
322                                 append(tst_data["throughput"]["value"])
323                         elif tst_data["type"] == "NDRPDR":
324                             tbl_dict[tst_name_mod]["cmp-data"].append(
325                                 tst_data["throughput"]["PDR"]["LOWER"])
326                     elif table["include-tests"] == "NDR":
327                         if tst_data["type"] == "NDR":
328                             tbl_dict[tst_name_mod]["cmp-data"]. \
329                                 append(tst_data["throughput"]["value"])
330                         elif tst_data["type"] == "NDRPDR":
331                             tbl_dict[tst_name_mod]["cmp-data"].append(
332                                 tst_data["throughput"]["NDR"]["LOWER"])
333                     else:
334                         continue
335                 except (KeyError, TypeError):
336                     pass
337     if history:
338         for item in history:
339             for job, builds in item["data"].items():
340                 for build in builds:
341                     for tst_name, tst_data in data[job][str(build)].iteritems():
342                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344                             replace("-ndrdisc", "").replace("-pdr", ""). \
345                             replace("-ndr", "").\
346                             replace("1t1c", "1c").replace("2t1c", "1c").\
347                             replace("2t2c", "2c").replace("4t2c", "2c").\
348                             replace("4t4c", "4c").replace("8t4c", "4c")
349                         if "across topologies" in table["title"].lower():
350                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
351                         if tbl_dict.get(tst_name_mod, None) is None:
352                             continue
353                         if tbl_dict[tst_name_mod].get("history", None) is None:
354                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
355                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
356                                                              None) is None:
357                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
358                                 list()
359                         try:
360                             # TODO: Re-work when NDRPDRDISC tests are not used
361                             if table["include-tests"] == "MRR":
362                                 tbl_dict[tst_name_mod]["history"][item["title"
363                                 ]].append(tst_data["result"]["receive-rate"].
364                                           avg)
365                             elif table["include-tests"] == "PDR":
366                                 if tst_data["type"] == "PDR":
367                                     tbl_dict[tst_name_mod]["history"][
368                                         item["title"]].\
369                                         append(tst_data["throughput"]["value"])
370                                 elif tst_data["type"] == "NDRPDR":
371                                     tbl_dict[tst_name_mod]["history"][item[
372                                         "title"]].append(tst_data["throughput"][
373                                         "PDR"]["LOWER"])
374                             elif table["include-tests"] == "NDR":
375                                 if tst_data["type"] == "NDR":
376                                     tbl_dict[tst_name_mod]["history"][
377                                         item["title"]].\
378                                         append(tst_data["throughput"]["value"])
379                                 elif tst_data["type"] == "NDRPDR":
380                                     tbl_dict[tst_name_mod]["history"][item[
381                                         "title"]].append(tst_data["throughput"][
382                                         "NDR"]["LOWER"])
383                             else:
384                                 continue
385                         except (TypeError, KeyError):
386                             pass
387
388     tbl_lst = list()
389     footnote = False
390     for tst_name in tbl_dict.keys():
391         item = [tbl_dict[tst_name]["name"], ]
392         if history:
393             if tbl_dict[tst_name].get("history", None) is not None:
394                 for hist_data in tbl_dict[tst_name]["history"].values():
395                     if hist_data:
396                         item.append(round(mean(hist_data) / 1000000, 2))
397                         item.append(round(stdev(hist_data) / 1000000, 2))
398                     else:
399                         item.extend(["Not tested", "Not tested"])
400             else:
401                 item.extend(["Not tested", "Not tested"])
402         data_t = tbl_dict[tst_name]["ref-data"]
403         if data_t:
404             item.append(round(mean(data_t) / 1000000, 2))
405             item.append(round(stdev(data_t) / 1000000, 2))
406         else:
407             item.extend(["Not tested", "Not tested"])
408         data_t = tbl_dict[tst_name]["cmp-data"]
409         if data_t:
410             item.append(round(mean(data_t) / 1000000, 2))
411             item.append(round(stdev(data_t) / 1000000, 2))
412         else:
413             item.extend(["Not tested", "Not tested"])
414         if item[-4] != "Not tested" and item[-2] != "Not tested" and item[-4] != 0:
415             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
416         elif item[-4] == "Not tested":
417             item.append("New in CSIT-1908")
418         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419             item.append("See footnote [1]")
420             footnote = True
421         if (len(item) == len(header)) and (item[-3] != "Not tested"):
422             tbl_lst.append(item)
423
424     # Sort the table according to the relative change
425     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
426
427     # Generate csv tables:
428     csv_file = "{0}.csv".format(table["output-file"])
429     with open(csv_file, "w") as file_handler:
430         file_handler.write(header_str)
431         for test in tbl_lst:
432             file_handler.write(",".join([str(item) for item in test]) + "\n")
433
434     txt_file_name = "{0}.txt".format(table["output-file"])
435     convert_csv_to_pretty_txt(csv_file, txt_file_name)
436
437     if footnote:
438         with open(txt_file_name, 'a') as txt_file:
439             txt_file.writelines([
440                 "Footnotes:",
441                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
442                 "2n-skx testbeds, dot1q encapsulation is now used on both "
443                 "links of SUT.",
444                 "    Previously dot1q was used only on a single link with the "
445                 "other link carrying untagged Ethernet frames. This change "
446                 "results",
447                 "    in slightly lower throughput in CSIT-1908 for these "
448                 "tests. See release notes."
449             ])
450
451
452 def table_performance_comparison_nic(table, input_data):
453     """Generate the table(s) with algorithm: table_performance_comparison
454     specified in the specification file.
455
456     :param table: Table to generate.
457     :param input_data: Data to process.
458     :type table: pandas.Series
459     :type input_data: InputData
460     """
461
462     logging.info("  Generating the table {0} ...".
463                  format(table.get("title", "")))
464
465     # Transform the data
466     logging.info("    Creating the data set for the {0} '{1}'.".
467                  format(table.get("type", ""), table.get("title", "")))
468     data = input_data.filter_data(table, continue_on_error=True)
469
470     # Prepare the header of the tables
471     try:
472         header = ["Test case", ]
473
474         if table["include-tests"] == "MRR":
475             hdr_param = "Rec Rate"
476         else:
477             hdr_param = "Thput"
478
479         history = table.get("history", None)
480         if history:
481             for item in history:
482                 header.extend(
483                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
484                      "{0} Stdev [Mpps]".format(item["title"])])
485         header.extend(
486             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
487              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
488              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
489              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
490              "Delta [%]"])
491         header_str = ",".join(header) + "\n"
492     except (AttributeError, KeyError) as err:
493         logging.error("The model is invalid, missing parameter: {0}".
494                       format(err))
495         return
496
497     # Prepare data to the table:
498     tbl_dict = dict()
499     for job, builds in table["reference"]["data"].items():
500         topo = "2n-skx" if "2n-skx" in job else ""
501         for build in builds:
502             for tst_name, tst_data in data[job][str(build)].iteritems():
503                 if table["reference"]["nic"] not in tst_data["tags"]:
504                     continue
505                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
506                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
507                     replace("-ndrdisc", "").replace("-pdr", "").\
508                     replace("-ndr", "").\
509                     replace("1t1c", "1c").replace("2t1c", "1c").\
510                     replace("2t2c", "2c").replace("4t2c", "2c").\
511                     replace("4t4c", "4c").replace("8t4c", "4c")
512                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
513                 if "across topologies" in table["title"].lower():
514                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
515                 if tbl_dict.get(tst_name_mod, None) is None:
516                     name = "{0}".format("-".join(tst_data["name"].
517                                                  split("-")[:-1]))
518                     if "across testbeds" in table["title"].lower() or \
519                             "across topologies" in table["title"].lower():
520                         name = name.\
521                             replace("1t1c", "1c").replace("2t1c", "1c").\
522                             replace("2t2c", "2c").replace("4t2c", "2c").\
523                             replace("4t4c", "4c").replace("8t4c", "4c")
524                     tbl_dict[tst_name_mod] = {"name": name,
525                                               "ref-data": list(),
526                                               "cmp-data": list()}
527                 try:
528                     # TODO: Re-work when NDRPDRDISC tests are not used
529                     if table["include-tests"] == "MRR":
530                         tbl_dict[tst_name_mod]["ref-data"]. \
531                             append(tst_data["result"]["receive-rate"].avg)
532                     elif table["include-tests"] == "PDR":
533                         if tst_data["type"] == "PDR":
534                             tbl_dict[tst_name_mod]["ref-data"]. \
535                                 append(tst_data["throughput"]["value"])
536                         elif tst_data["type"] == "NDRPDR":
537                             tbl_dict[tst_name_mod]["ref-data"].append(
538                                 tst_data["throughput"]["PDR"]["LOWER"])
539                     elif table["include-tests"] == "NDR":
540                         if tst_data["type"] == "NDR":
541                             tbl_dict[tst_name_mod]["ref-data"]. \
542                                 append(tst_data["throughput"]["value"])
543                         elif tst_data["type"] == "NDRPDR":
544                             tbl_dict[tst_name_mod]["ref-data"].append(
545                                 tst_data["throughput"]["NDR"]["LOWER"])
546                     else:
547                         continue
548                 except TypeError:
549                     pass  # No data in output.xml for this test
550
551     for job, builds in table["compare"]["data"].items():
552         for build in builds:
553             for tst_name, tst_data in data[job][str(build)].iteritems():
554                 if table["compare"]["nic"] not in tst_data["tags"]:
555                     continue
556                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
557                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
558                     replace("-ndrdisc", "").replace("-pdr", ""). \
559                     replace("-ndr", "").\
560                     replace("1t1c", "1c").replace("2t1c", "1c").\
561                     replace("2t2c", "2c").replace("4t2c", "2c").\
562                     replace("4t4c", "4c").replace("8t4c", "4c")
563                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
564                 if "across topologies" in table["title"].lower():
565                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
566                 if tbl_dict.get(tst_name_mod, None) is None:
567                     name = "{0}".format("-".join(tst_data["name"].
568                                                  split("-")[:-1]))
569                     if "across testbeds" in table["title"].lower() or \
570                             "across topologies" in table["title"].lower():
571                         name = name.\
572                             replace("1t1c", "1c").replace("2t1c", "1c").\
573                             replace("2t2c", "2c").replace("4t2c", "2c").\
574                             replace("4t4c", "4c").replace("8t4c", "4c")
575                     tbl_dict[tst_name_mod] = {"name": name,
576                                               "ref-data": list(),
577                                               "cmp-data": list()}
578                 try:
579                     # TODO: Re-work when NDRPDRDISC tests are not used
580                     if table["include-tests"] == "MRR":
581                         tbl_dict[tst_name_mod]["cmp-data"]. \
582                             append(tst_data["result"]["receive-rate"].avg)
583                     elif table["include-tests"] == "PDR":
584                         if tst_data["type"] == "PDR":
585                             tbl_dict[tst_name_mod]["cmp-data"]. \
586                                 append(tst_data["throughput"]["value"])
587                         elif tst_data["type"] == "NDRPDR":
588                             tbl_dict[tst_name_mod]["cmp-data"].append(
589                                 tst_data["throughput"]["PDR"]["LOWER"])
590                     elif table["include-tests"] == "NDR":
591                         if tst_data["type"] == "NDR":
592                             tbl_dict[tst_name_mod]["cmp-data"]. \
593                                 append(tst_data["throughput"]["value"])
594                         elif tst_data["type"] == "NDRPDR":
595                             tbl_dict[tst_name_mod]["cmp-data"].append(
596                                 tst_data["throughput"]["NDR"]["LOWER"])
597                     else:
598                         continue
599                 except (KeyError, TypeError):
600                     pass
601
602     if history:
603         for item in history:
604             for job, builds in item["data"].items():
605                 for build in builds:
606                     for tst_name, tst_data in data[job][str(build)].iteritems():
607                         if item["nic"] not in tst_data["tags"]:
608                             continue
609                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
610                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
611                             replace("-ndrdisc", "").replace("-pdr", ""). \
612                             replace("-ndr", "").\
613                             replace("1t1c", "1c").replace("2t1c", "1c").\
614                             replace("2t2c", "2c").replace("4t2c", "2c").\
615                             replace("4t4c", "4c").replace("8t4c", "4c")
616                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
617                         if "across topologies" in table["title"].lower():
618                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
619                         if tbl_dict.get(tst_name_mod, None) is None:
620                             continue
621                         if tbl_dict[tst_name_mod].get("history", None) is None:
622                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
623                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
624                                                              None) is None:
625                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
626                                 list()
627                         try:
628                             # TODO: Re-work when NDRPDRDISC tests are not used
629                             if table["include-tests"] == "MRR":
630                                 tbl_dict[tst_name_mod]["history"][item["title"
631                                 ]].append(tst_data["result"]["receive-rate"].
632                                           avg)
633                             elif table["include-tests"] == "PDR":
634                                 if tst_data["type"] == "PDR":
635                                     tbl_dict[tst_name_mod]["history"][
636                                         item["title"]].\
637                                         append(tst_data["throughput"]["value"])
638                                 elif tst_data["type"] == "NDRPDR":
639                                     tbl_dict[tst_name_mod]["history"][item[
640                                         "title"]].append(tst_data["throughput"][
641                                         "PDR"]["LOWER"])
642                             elif table["include-tests"] == "NDR":
643                                 if tst_data["type"] == "NDR":
644                                     tbl_dict[tst_name_mod]["history"][
645                                         item["title"]].\
646                                         append(tst_data["throughput"]["value"])
647                                 elif tst_data["type"] == "NDRPDR":
648                                     tbl_dict[tst_name_mod]["history"][item[
649                                         "title"]].append(tst_data["throughput"][
650                                         "NDR"]["LOWER"])
651                             else:
652                                 continue
653                         except (TypeError, KeyError):
654                             pass
655
656     tbl_lst = list()
657     footnote = False
658     for tst_name in tbl_dict.keys():
659         item = [tbl_dict[tst_name]["name"], ]
660         if history:
661             if tbl_dict[tst_name].get("history", None) is not None:
662                 for hist_data in tbl_dict[tst_name]["history"].values():
663                     if hist_data:
664                         item.append(round(mean(hist_data) / 1000000, 2))
665                         item.append(round(stdev(hist_data) / 1000000, 2))
666                     else:
667                         item.extend(["Not tested", "Not tested"])
668             else:
669                 item.extend(["Not tested", "Not tested"])
670         data_t = tbl_dict[tst_name]["ref-data"]
671         if data_t:
672             item.append(round(mean(data_t) / 1000000, 2))
673             item.append(round(stdev(data_t) / 1000000, 2))
674         else:
675             item.extend(["Not tested", "Not tested"])
676         data_t = tbl_dict[tst_name]["cmp-data"]
677         if data_t:
678             item.append(round(mean(data_t) / 1000000, 2))
679             item.append(round(stdev(data_t) / 1000000, 2))
680         else:
681             item.extend(["Not tested", "Not tested"])
682         if item[-4] != "Not tested" and item[-2] != "Not tested" and item[-4] != 0:
683             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
684         elif item[-4] == "Not tested":
685             item.append("New in CSIT-1908")
686         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
687             item.append("See footnote [1]")
688             footnote = True
689         if (len(item) == len(header)) and (item[-3] != "Not tested"):
690             tbl_lst.append(item)
691
692     # Sort the table according to the relative change
693     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
694
695     # Generate csv tables:
696     csv_file = "{0}.csv".format(table["output-file"])
697     with open(csv_file, "w") as file_handler:
698         file_handler.write(header_str)
699         for test in tbl_lst:
700             file_handler.write(",".join([str(item) for item in test]) + "\n")
701
702     txt_file_name = "{0}.txt".format(table["output-file"])
703     convert_csv_to_pretty_txt(csv_file, txt_file_name)
704
705     if footnote:
706         with open(txt_file_name, 'a') as txt_file:
707             txt_file.writelines([
708                 "Footnotes:",
709                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
710                 "2n-skx testbeds, dot1q encapsulation is now used on both "
711                 "links of SUT.",
712                 "    Previously dot1q was used only on a single link with the "
713                 "other link carrying untagged Ethernet frames. This change "
714                 "results",
715                 "    in slightly lower throughput in CSIT-1908 for these "
716                 "tests. See release notes."
717             ])
718
719
720 def table_nics_comparison(table, input_data):
721     """Generate the table(s) with algorithm: table_nics_comparison
722     specified in the specification file.
723
724     :param table: Table to generate.
725     :param input_data: Data to process.
726     :type table: pandas.Series
727     :type input_data: InputData
728     """
729
730     logging.info("  Generating the table {0} ...".
731                  format(table.get("title", "")))
732
733     # Transform the data
734     logging.info("    Creating the data set for the {0} '{1}'.".
735                  format(table.get("type", ""), table.get("title", "")))
736     data = input_data.filter_data(table, continue_on_error=True)
737
738     # Prepare the header of the tables
739     try:
740         header = ["Test case", ]
741
742         if table["include-tests"] == "MRR":
743             hdr_param = "Receive Rate"
744         else:
745             hdr_param = "Throughput"
746
747         header.extend(
748             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
749              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
750              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
751              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
752              "Delta [%]"])
753         header_str = ",".join(header) + "\n"
754     except (AttributeError, KeyError) as err:
755         logging.error("The model is invalid, missing parameter: {0}".
756                       format(err))
757         return
758
759     # Prepare data to the table:
760     tbl_dict = dict()
761     for job, builds in table["data"].items():
762         for build in builds:
763             for tst_name, tst_data in data[job][str(build)].iteritems():
764                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
765                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
766                     replace("-ndrdisc", "").replace("-pdr", "").\
767                     replace("-ndr", "").\
768                     replace("1t1c", "1c").replace("2t1c", "1c").\
769                     replace("2t2c", "2c").replace("4t2c", "2c").\
770                     replace("4t4c", "4c").replace("8t4c", "4c")
771                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
772                 if tbl_dict.get(tst_name_mod, None) is None:
773                     name = "-".join(tst_data["name"].split("-")[:-1])
774                     tbl_dict[tst_name_mod] = {"name": name,
775                                               "ref-data": list(),
776                                               "cmp-data": list()}
777                 try:
778                     if table["include-tests"] == "MRR":
779                         result = tst_data["result"]["receive-rate"].avg
780                     elif table["include-tests"] == "PDR":
781                         result = tst_data["throughput"]["PDR"]["LOWER"]
782                     elif table["include-tests"] == "NDR":
783                         result = tst_data["throughput"]["NDR"]["LOWER"]
784                     else:
785                         result = None
786
787                     if result:
788                         if table["reference"]["nic"] in tst_data["tags"]:
789                             tbl_dict[tst_name_mod]["ref-data"].append(result)
790                         elif table["compare"]["nic"] in tst_data["tags"]:
791                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
792                 except (TypeError, KeyError) as err:
793                     logging.debug("No data for {0}".format(tst_name))
794                     logging.debug(repr(err))
795                     # No data in output.xml for this test
796
797     tbl_lst = list()
798     for tst_name in tbl_dict.keys():
799         item = [tbl_dict[tst_name]["name"], ]
800         data_t = tbl_dict[tst_name]["ref-data"]
801         if data_t:
802             item.append(round(mean(data_t) / 1000000, 2))
803             item.append(round(stdev(data_t) / 1000000, 2))
804         else:
805             item.extend([None, None])
806         data_t = tbl_dict[tst_name]["cmp-data"]
807         if data_t:
808             item.append(round(mean(data_t) / 1000000, 2))
809             item.append(round(stdev(data_t) / 1000000, 2))
810         else:
811             item.extend([None, None])
812         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
813             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
814         if len(item) == len(header):
815             tbl_lst.append(item)
816
817     # Sort the table according to the relative change
818     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
819
820     # Generate csv tables:
821     csv_file = "{0}.csv".format(table["output-file"])
822     with open(csv_file, "w") as file_handler:
823         file_handler.write(header_str)
824         for test in tbl_lst:
825             file_handler.write(",".join([str(item) for item in test]) + "\n")
826
827     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
828
829
830 def table_soak_vs_ndr(table, input_data):
831     """Generate the table(s) with algorithm: table_soak_vs_ndr
832     specified in the specification file.
833
834     :param table: Table to generate.
835     :param input_data: Data to process.
836     :type table: pandas.Series
837     :type input_data: InputData
838     """
839
840     logging.info("  Generating the table {0} ...".
841                  format(table.get("title", "")))
842
843     # Transform the data
844     logging.info("    Creating the data set for the {0} '{1}'.".
845                  format(table.get("type", ""), table.get("title", "")))
846     data = input_data.filter_data(table, continue_on_error=True)
847
848     # Prepare the header of the table
849     try:
850         header = [
851             "Test case",
852             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
853             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
854             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
855             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
856             "Delta [%]", "Stdev of delta [%]"]
857         header_str = ",".join(header) + "\n"
858     except (AttributeError, KeyError) as err:
859         logging.error("The model is invalid, missing parameter: {0}".
860                       format(err))
861         return
862
863     # Create a list of available SOAK test results:
864     tbl_dict = dict()
865     for job, builds in table["compare"]["data"].items():
866         for build in builds:
867             for tst_name, tst_data in data[job][str(build)].iteritems():
868                 if tst_data["type"] == "SOAK":
869                     tst_name_mod = tst_name.replace("-soak", "")
870                     if tbl_dict.get(tst_name_mod, None) is None:
871                         groups = re.search(REGEX_NIC, tst_data["parent"])
872                         nic = groups.group(0) if groups else ""
873                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
874                                                               split("-")[:-1]))
875                         tbl_dict[tst_name_mod] = {
876                             "name": name,
877                             "ref-data": list(),
878                             "cmp-data": list()
879                         }
880                     try:
881                         tbl_dict[tst_name_mod]["cmp-data"].append(
882                             tst_data["throughput"]["LOWER"])
883                     except (KeyError, TypeError):
884                         pass
885     tests_lst = tbl_dict.keys()
886
887     # Add corresponding NDR test results:
888     for job, builds in table["reference"]["data"].items():
889         for build in builds:
890             for tst_name, tst_data in data[job][str(build)].iteritems():
891                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
892                     replace("-mrr", "")
893                 if tst_name_mod in tests_lst:
894                     try:
895                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
896                             if table["include-tests"] == "MRR":
897                                 result = tst_data["result"]["receive-rate"].avg
898                             elif table["include-tests"] == "PDR":
899                                 result = tst_data["throughput"]["PDR"]["LOWER"]
900                             elif table["include-tests"] == "NDR":
901                                 result = tst_data["throughput"]["NDR"]["LOWER"]
902                             else:
903                                 result = None
904                             if result is not None:
905                                 tbl_dict[tst_name_mod]["ref-data"].append(
906                                     result)
907                     except (KeyError, TypeError):
908                         continue
909
910     tbl_lst = list()
911     for tst_name in tbl_dict.keys():
912         item = [tbl_dict[tst_name]["name"], ]
913         data_r = tbl_dict[tst_name]["ref-data"]
914         if data_r:
915             data_r_mean = mean(data_r)
916             item.append(round(data_r_mean / 1000000, 2))
917             data_r_stdev = stdev(data_r)
918             item.append(round(data_r_stdev / 1000000, 2))
919         else:
920             data_r_mean = None
921             data_r_stdev = None
922             item.extend([None, None])
923         data_c = tbl_dict[tst_name]["cmp-data"]
924         if data_c:
925             data_c_mean = mean(data_c)
926             item.append(round(data_c_mean / 1000000, 2))
927             data_c_stdev = stdev(data_c)
928             item.append(round(data_c_stdev / 1000000, 2))
929         else:
930             data_c_mean = None
931             data_c_stdev = None
932             item.extend([None, None])
933         if data_r_mean and data_c_mean:
934             delta, d_stdev = relative_change_stdev(
935                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
936             item.append(round(delta, 2))
937             item.append(round(d_stdev, 2))
938             tbl_lst.append(item)
939
940     # Sort the table according to the relative change
941     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
942
943     # Generate csv tables:
944     csv_file = "{0}.csv".format(table["output-file"])
945     with open(csv_file, "w") as file_handler:
946         file_handler.write(header_str)
947         for test in tbl_lst:
948             file_handler.write(",".join([str(item) for item in test]) + "\n")
949
950     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
951
952
953 def table_performance_trending_dashboard(table, input_data):
954     """Generate the table(s) with algorithm:
955     table_performance_trending_dashboard
956     specified in the specification file.
957
958     :param table: Table to generate.
959     :param input_data: Data to process.
960     :type table: pandas.Series
961     :type input_data: InputData
962     """
963
964     logging.info("  Generating the table {0} ...".
965                  format(table.get("title", "")))
966
967     # Transform the data
968     logging.info("    Creating the data set for the {0} '{1}'.".
969                  format(table.get("type", ""), table.get("title", "")))
970     data = input_data.filter_data(table, continue_on_error=True)
971
972     # Prepare the header of the tables
973     header = ["Test Case",
974               "Trend [Mpps]",
975               "Short-Term Change [%]",
976               "Long-Term Change [%]",
977               "Regressions [#]",
978               "Progressions [#]"
979               ]
980     header_str = ",".join(header) + "\n"
981
982     # Prepare data to the table:
983     tbl_dict = dict()
984     for job, builds in table["data"].items():
985         for build in builds:
986             for tst_name, tst_data in data[job][str(build)].iteritems():
987                 if tst_name.lower() in table.get("ignore-list", list()):
988                     continue
989                 if tbl_dict.get(tst_name, None) is None:
990                     groups = re.search(REGEX_NIC, tst_data["parent"])
991                     if not groups:
992                         continue
993                     nic = groups.group(0)
994                     tbl_dict[tst_name] = {
995                         "name": "{0}-{1}".format(nic, tst_data["name"]),
996                         "data": OrderedDict()}
997                 try:
998                     tbl_dict[tst_name]["data"][str(build)] = \
999                         tst_data["result"]["receive-rate"]
1000                 except (TypeError, KeyError):
1001                     pass  # No data in output.xml for this test
1002
1003     tbl_lst = list()
1004     for tst_name in tbl_dict.keys():
1005         data_t = tbl_dict[tst_name]["data"]
1006         if len(data_t) < 2:
1007             continue
1008
1009         classification_lst, avgs = classify_anomalies(data_t)
1010
1011         win_size = min(len(data_t), table["window"])
1012         long_win_size = min(len(data_t), table["long-trend-window"])
1013
1014         try:
1015             max_long_avg = max(
1016                 [x for x in avgs[-long_win_size:-win_size]
1017                  if not isnan(x)])
1018         except ValueError:
1019             max_long_avg = nan
1020         last_avg = avgs[-1]
1021         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1022
1023         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1024             rel_change_last = nan
1025         else:
1026             rel_change_last = round(
1027                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1028
1029         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1030             rel_change_long = nan
1031         else:
1032             rel_change_long = round(
1033                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1034
1035         if classification_lst:
1036             if isnan(rel_change_last) and isnan(rel_change_long):
1037                 continue
1038             if (isnan(last_avg) or
1039                 isnan(rel_change_last) or
1040                 isnan(rel_change_long)):
1041                 continue
1042             tbl_lst.append(
1043                 [tbl_dict[tst_name]["name"],
1044                  round(last_avg / 1000000, 2),
1045                  rel_change_last,
1046                  rel_change_long,
1047                  classification_lst[-win_size:].count("regression"),
1048                  classification_lst[-win_size:].count("progression")])
1049
1050     tbl_lst.sort(key=lambda rel: rel[0])
1051
1052     tbl_sorted = list()
1053     for nrr in range(table["window"], -1, -1):
1054         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1055         for nrp in range(table["window"], -1, -1):
1056             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1057             tbl_out.sort(key=lambda rel: rel[2])
1058             tbl_sorted.extend(tbl_out)
1059
1060     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1061
1062     logging.info("    Writing file: '{0}'".format(file_name))
1063     with open(file_name, "w") as file_handler:
1064         file_handler.write(header_str)
1065         for test in tbl_sorted:
1066             file_handler.write(",".join([str(item) for item in test]) + '\n')
1067
1068     txt_file_name = "{0}.txt".format(table["output-file"])
1069     logging.info("    Writing file: '{0}'".format(txt_file_name))
1070     convert_csv_to_pretty_txt(file_name, txt_file_name)
1071
1072
1073 def _generate_url(base, testbed, test_name):
1074     """Generate URL to a trending plot from the name of the test case.
1075
1076     :param base: The base part of URL common to all test cases.
1077     :param testbed: The testbed used for testing.
1078     :param test_name: The name of the test case.
1079     :type base: str
1080     :type testbed: str
1081     :type test_name: str
1082     :returns: The URL to the plot with the trending data for the given test
1083         case.
1084     :rtype str
1085     """
1086
1087     url = base
1088     file_name = ""
1089     anchor = ".html#"
1090     feature = ""
1091
1092     if "lbdpdk" in test_name or "lbvpp" in test_name:
1093         file_name = "link_bonding"
1094
1095     elif "114b" in test_name and "vhost" in test_name:
1096         file_name = "vts"
1097
1098     elif "testpmd" in test_name or "l3fwd" in test_name:
1099         file_name = "dpdk"
1100
1101     elif "memif" in test_name:
1102         file_name = "container_memif"
1103         feature = "-base"
1104
1105     elif "srv6" in test_name:
1106         file_name = "srv6"
1107
1108     elif "vhost" in test_name:
1109         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1110             file_name = "vm_vhost_l2"
1111             if "114b" in test_name:
1112                 feature = ""
1113             elif "l2xcbase" in test_name and "x520" in test_name:
1114                 feature = "-base-l2xc"
1115             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1116                 feature = "-base-l2bd"
1117             else:
1118                 feature = "-base"
1119         elif "ip4base" in test_name:
1120             file_name = "vm_vhost_ip4"
1121             feature = "-base"
1122
1123     elif "ipsecbasetnlsw" in test_name:
1124         file_name = "ipsecsw"
1125         feature = "-base-scale"
1126
1127     elif "ipsec" in test_name:
1128         file_name = "ipsec"
1129         feature = "-base-scale"
1130         if "hw-" in test_name:
1131             file_name = "ipsechw"
1132         elif "sw-" in test_name:
1133             file_name = "ipsecsw"
1134         if "-int-" in test_name:
1135             feature = "-base-scale-int"
1136         elif "tnl" in test_name:
1137             feature = "-base-scale-tnl"
1138
1139     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1140         file_name = "ip4_tunnels"
1141         feature = "-base"
1142
1143     elif "ip4base" in test_name or "ip4scale" in test_name:
1144         file_name = "ip4"
1145         if "xl710" in test_name:
1146             feature = "-base-scale-features"
1147         elif "iacl" in test_name:
1148             feature = "-features-iacl"
1149         elif "oacl" in test_name:
1150             feature = "-features-oacl"
1151         elif "snat" in test_name or "cop" in test_name:
1152             feature = "-features"
1153         else:
1154             feature = "-base-scale"
1155
1156     elif "ip6base" in test_name or "ip6scale" in test_name:
1157         file_name = "ip6"
1158         feature = "-base-scale"
1159
1160     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1161             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1162             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1163         file_name = "l2"
1164         if "macip" in test_name:
1165             feature = "-features-macip"
1166         elif "iacl" in test_name:
1167             feature = "-features-iacl"
1168         elif "oacl" in test_name:
1169             feature = "-features-oacl"
1170         else:
1171             feature = "-base-scale"
1172
1173     if "x520" in test_name:
1174         nic = "x520-"
1175     elif "x710" in test_name:
1176         nic = "x710-"
1177     elif "xl710" in test_name:
1178         nic = "xl710-"
1179     elif "xxv710" in test_name:
1180         nic = "xxv710-"
1181     elif "vic1227" in test_name:
1182         nic = "vic1227-"
1183     elif "vic1385" in test_name:
1184         nic = "vic1385-"
1185     elif "x553" in test_name:
1186         nic = "x553-"
1187     else:
1188         nic = ""
1189     anchor += nic
1190
1191     if "64b" in test_name:
1192         framesize = "64b"
1193     elif "78b" in test_name:
1194         framesize = "78b"
1195     elif "imix" in test_name:
1196         framesize = "imix"
1197     elif "9000b" in test_name:
1198         framesize = "9000b"
1199     elif "1518b" in test_name:
1200         framesize = "1518b"
1201     elif "114b" in test_name:
1202         framesize = "114b"
1203     else:
1204         framesize = ""
1205     anchor += framesize + '-'
1206
1207     if "1t1c" in test_name:
1208         anchor += "1t1c"
1209     elif "2t2c" in test_name:
1210         anchor += "2t2c"
1211     elif "4t4c" in test_name:
1212         anchor += "4t4c"
1213     elif "2t1c" in test_name:
1214         anchor += "2t1c"
1215     elif "4t2c" in test_name:
1216         anchor += "4t2c"
1217     elif "8t4c" in test_name:
1218         anchor += "8t4c"
1219
1220     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1221         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1222
1223
1224 def table_performance_trending_dashboard_html(table, input_data):
1225     """Generate the table(s) with algorithm:
1226     table_performance_trending_dashboard_html specified in the specification
1227     file.
1228
1229     :param table: Table to generate.
1230     :param input_data: Data to process.
1231     :type table: dict
1232     :type input_data: InputData
1233     """
1234
1235     testbed = table.get("testbed", None)
1236     if testbed is None:
1237         logging.error("The testbed is not defined for the table '{0}'.".
1238                       format(table.get("title", "")))
1239         return
1240
1241     logging.info("  Generating the table {0} ...".
1242                  format(table.get("title", "")))
1243
1244     try:
1245         with open(table["input-file"], 'rb') as csv_file:
1246             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1247             csv_lst = [item for item in csv_content]
1248     except KeyError:
1249         logging.warning("The input file is not defined.")
1250         return
1251     except csv.Error as err:
1252         logging.warning("Not possible to process the file '{0}'.\n{1}".
1253                         format(table["input-file"], err))
1254         return
1255
1256     # Table:
1257     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1258
1259     # Table header:
1260     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1261     for idx, item in enumerate(csv_lst[0]):
1262         alignment = "left" if idx == 0 else "center"
1263         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1264         th.text = item
1265
1266     # Rows:
1267     colors = {"regression": ("#ffcccc", "#ff9999"),
1268               "progression": ("#c6ecc6", "#9fdf9f"),
1269               "normal": ("#e9f1fb", "#d4e4f7")}
1270     for r_idx, row in enumerate(csv_lst[1:]):
1271         if int(row[4]):
1272             color = "regression"
1273         elif int(row[5]):
1274             color = "progression"
1275         else:
1276             color = "normal"
1277         background = colors[color][r_idx % 2]
1278         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1279
1280         # Columns:
1281         for c_idx, item in enumerate(row):
1282             alignment = "left" if c_idx == 0 else "center"
1283             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1284             # Name:
1285             if c_idx == 0:
1286                 url = _generate_url("../trending/", testbed, item)
1287                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1288                 ref.text = item
1289             else:
1290                 td.text = item
1291     try:
1292         with open(table["output-file"], 'w') as html_file:
1293             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1294             html_file.write(".. raw:: html\n\n\t")
1295             html_file.write(ET.tostring(dashboard))
1296             html_file.write("\n\t<p><br><br></p>\n")
1297     except KeyError:
1298         logging.warning("The output file is not defined.")
1299         return
1300
1301
1302 def table_last_failed_tests(table, input_data):
1303     """Generate the table(s) with algorithm: table_last_failed_tests
1304     specified in the specification file.
1305
1306     :param table: Table to generate.
1307     :param input_data: Data to process.
1308     :type table: pandas.Series
1309     :type input_data: InputData
1310     """
1311
1312     logging.info("  Generating the table {0} ...".
1313                  format(table.get("title", "")))
1314
1315     # Transform the data
1316     logging.info("    Creating the data set for the {0} '{1}'.".
1317                  format(table.get("type", ""), table.get("title", "")))
1318     data = input_data.filter_data(table, continue_on_error=True)
1319
1320     if data is None or data.empty:
1321         logging.warn("    No data for the {0} '{1}'.".
1322                      format(table.get("type", ""), table.get("title", "")))
1323         return
1324
1325     tbl_list = list()
1326     for job, builds in table["data"].items():
1327         for build in builds:
1328             build = str(build)
1329             try:
1330                 version = input_data.metadata(job, build).get("version", "")
1331             except KeyError:
1332                 logging.error("Data for {job}: {build} is not present.".
1333                               format(job=job, build=build))
1334                 return
1335             tbl_list.append(build)
1336             tbl_list.append(version)
1337             for tst_name, tst_data in data[job][build].iteritems():
1338                 if tst_data["status"] != "FAIL":
1339                     continue
1340                 groups = re.search(REGEX_NIC, tst_data["parent"])
1341                 if not groups:
1342                     continue
1343                 nic = groups.group(0)
1344                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1345
1346     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1347     logging.info("    Writing file: '{0}'".format(file_name))
1348     with open(file_name, "w") as file_handler:
1349         for test in tbl_list:
1350             file_handler.write(test + '\n')
1351
1352
1353 def table_failed_tests(table, input_data):
1354     """Generate the table(s) with algorithm: table_failed_tests
1355     specified in the specification file.
1356
1357     :param table: Table to generate.
1358     :param input_data: Data to process.
1359     :type table: pandas.Series
1360     :type input_data: InputData
1361     """
1362
1363     logging.info("  Generating the table {0} ...".
1364                  format(table.get("title", "")))
1365
1366     # Transform the data
1367     logging.info("    Creating the data set for the {0} '{1}'.".
1368                  format(table.get("type", ""), table.get("title", "")))
1369     data = input_data.filter_data(table, continue_on_error=True)
1370
1371     # Prepare the header of the tables
1372     header = ["Test Case",
1373               "Failures [#]",
1374               "Last Failure [Time]",
1375               "Last Failure [VPP-Build-Id]",
1376               "Last Failure [CSIT-Job-Build-Id]"]
1377
1378     # Generate the data for the table according to the model in the table
1379     # specification
1380
1381     now = dt.utcnow()
1382     timeperiod = timedelta(int(table.get("window", 7)))
1383
1384     tbl_dict = dict()
1385     for job, builds in table["data"].items():
1386         for build in builds:
1387             build = str(build)
1388             for tst_name, tst_data in data[job][build].iteritems():
1389                 if tst_name.lower() in table.get("ignore-list", list()):
1390                     continue
1391                 if tbl_dict.get(tst_name, None) is None:
1392                     groups = re.search(REGEX_NIC, tst_data["parent"])
1393                     if not groups:
1394                         continue
1395                     nic = groups.group(0)
1396                     tbl_dict[tst_name] = {
1397                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1398                         "data": OrderedDict()}
1399                 try:
1400                     generated = input_data.metadata(job, build).\
1401                         get("generated", "")
1402                     if not generated:
1403                         continue
1404                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1405                     if (now - then) <= timeperiod:
1406                         tbl_dict[tst_name]["data"][build] = (
1407                             tst_data["status"],
1408                             generated,
1409                             input_data.metadata(job, build).get("version", ""),
1410                             build)
1411                 except (TypeError, KeyError) as err:
1412                     logging.warning("tst_name: {} - err: {}".
1413                                     format(tst_name, repr(err)))
1414
1415     max_fails = 0
1416     tbl_lst = list()
1417     for tst_data in tbl_dict.values():
1418         fails_nr = 0
1419         for val in tst_data["data"].values():
1420             if val[0] == "FAIL":
1421                 fails_nr += 1
1422                 fails_last_date = val[1]
1423                 fails_last_vpp = val[2]
1424                 fails_last_csit = val[3]
1425         if fails_nr:
1426             max_fails = fails_nr if fails_nr > max_fails else max_fails
1427             tbl_lst.append([tst_data["name"],
1428                             fails_nr,
1429                             fails_last_date,
1430                             fails_last_vpp,
1431                             "mrr-daily-build-{0}".format(fails_last_csit)])
1432
1433     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1434     tbl_sorted = list()
1435     for nrf in range(max_fails, -1, -1):
1436         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1437         tbl_sorted.extend(tbl_fails)
1438     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1439
1440     logging.info("    Writing file: '{0}'".format(file_name))
1441     with open(file_name, "w") as file_handler:
1442         file_handler.write(",".join(header) + "\n")
1443         for test in tbl_sorted:
1444             file_handler.write(",".join([str(item) for item in test]) + '\n')
1445
1446     txt_file_name = "{0}.txt".format(table["output-file"])
1447     logging.info("    Writing file: '{0}'".format(txt_file_name))
1448     convert_csv_to_pretty_txt(file_name, txt_file_name)
1449
1450
1451 def table_failed_tests_html(table, input_data):
1452     """Generate the table(s) with algorithm: table_failed_tests_html
1453     specified in the specification file.
1454
1455     :param table: Table to generate.
1456     :param input_data: Data to process.
1457     :type table: pandas.Series
1458     :type input_data: InputData
1459     """
1460
1461     testbed = table.get("testbed", None)
1462     if testbed is None:
1463         logging.error("The testbed is not defined for the table '{0}'.".
1464                       format(table.get("title", "")))
1465         return
1466
1467     logging.info("  Generating the table {0} ...".
1468                  format(table.get("title", "")))
1469
1470     try:
1471         with open(table["input-file"], 'rb') as csv_file:
1472             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1473             csv_lst = [item for item in csv_content]
1474     except KeyError:
1475         logging.warning("The input file is not defined.")
1476         return
1477     except csv.Error as err:
1478         logging.warning("Not possible to process the file '{0}'.\n{1}".
1479                         format(table["input-file"], err))
1480         return
1481
1482     # Table:
1483     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1484
1485     # Table header:
1486     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1487     for idx, item in enumerate(csv_lst[0]):
1488         alignment = "left" if idx == 0 else "center"
1489         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1490         th.text = item
1491
1492     # Rows:
1493     colors = ("#e9f1fb", "#d4e4f7")
1494     for r_idx, row in enumerate(csv_lst[1:]):
1495         background = colors[r_idx % 2]
1496         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1497
1498         # Columns:
1499         for c_idx, item in enumerate(row):
1500             alignment = "left" if c_idx == 0 else "center"
1501             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1502             # Name:
1503             if c_idx == 0:
1504                 url = _generate_url("../trending/", testbed, item)
1505                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1506                 ref.text = item
1507             else:
1508                 td.text = item
1509     try:
1510         with open(table["output-file"], 'w') as html_file:
1511             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1512             html_file.write(".. raw:: html\n\n\t")
1513             html_file.write(ET.tostring(failed_tests))
1514             html_file.write("\n\t<p><br><br></p>\n")
1515     except KeyError:
1516         logging.warning("The output file is not defined.")
1517         return