Report: Add data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Rec Rate"
214         else:
215             hdr_param = "Thput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         topo = "2n-skx" if "2n-skx" in job else ""
239         for build in builds:
240             for tst_name, tst_data in data[job][str(build)].iteritems():
241                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
243                     replace("-ndrdisc", "").replace("-pdr", "").\
244                     replace("-ndr", "").\
245                     replace("1t1c", "1c").replace("2t1c", "1c").\
246                     replace("2t2c", "2c").replace("4t2c", "2c").\
247                     replace("4t4c", "4c").replace("8t4c", "4c")
248                 if "across topologies" in table["title"].lower():
249                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
250                 if tbl_dict.get(tst_name_mod, None) is None:
251                     groups = re.search(REGEX_NIC, tst_data["parent"])
252                     nic = groups.group(0) if groups else ""
253                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254                                                           split("-")[:-1]))
255                     if "across testbeds" in table["title"].lower() or \
256                             "across topologies" in table["title"].lower():
257                         name = name.\
258                             replace("1t1c", "1c").replace("2t1c", "1c").\
259                             replace("2t2c", "2c").replace("4t2c", "2c").\
260                             replace("4t4c", "4c").replace("8t4c", "4c")
261                     tbl_dict[tst_name_mod] = {"name": name,
262                                               "ref-data": list(),
263                                               "cmp-data": list()}
264                 try:
265                     # TODO: Re-work when NDRPDRDISC tests are not used
266                     if table["include-tests"] == "MRR":
267                         tbl_dict[tst_name_mod]["ref-data"]. \
268                             append(tst_data["result"]["receive-rate"].avg)
269                     elif table["include-tests"] == "PDR":
270                         if tst_data["type"] == "PDR":
271                             tbl_dict[tst_name_mod]["ref-data"]. \
272                                 append(tst_data["throughput"]["value"])
273                         elif tst_data["type"] == "NDRPDR":
274                             tbl_dict[tst_name_mod]["ref-data"].append(
275                                 tst_data["throughput"]["PDR"]["LOWER"])
276                     elif table["include-tests"] == "NDR":
277                         if tst_data["type"] == "NDR":
278                             tbl_dict[tst_name_mod]["ref-data"]. \
279                                 append(tst_data["throughput"]["value"])
280                         elif tst_data["type"] == "NDRPDR":
281                             tbl_dict[tst_name_mod]["ref-data"].append(
282                                 tst_data["throughput"]["NDR"]["LOWER"])
283                     else:
284                         continue
285                 except TypeError:
286                     pass  # No data in output.xml for this test
287
288     for job, builds in table["compare"]["data"].items():
289         for build in builds:
290             for tst_name, tst_data in data[job][str(build)].iteritems():
291                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293                     replace("-ndrdisc", "").replace("-pdr", ""). \
294                     replace("-ndr", "").\
295                     replace("1t1c", "1c").replace("2t1c", "1c").\
296                     replace("2t2c", "2c").replace("4t2c", "2c").\
297                     replace("4t4c", "4c").replace("8t4c", "4c")
298                 if "across topologies" in table["title"].lower():
299                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
300                 if tbl_dict.get(tst_name_mod, None) is None:
301                     groups = re.search(REGEX_NIC, tst_data["parent"])
302                     nic = groups.group(0) if groups else ""
303                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
304                                                           split("-")[:-1]))
305                     if "across testbeds" in table["title"].lower() or \
306                             "across topologies" in table["title"].lower():
307                         name = name.\
308                             replace("1t1c", "1c").replace("2t1c", "1c").\
309                             replace("2t2c", "2c").replace("4t2c", "2c").\
310                             replace("4t4c", "4c").replace("8t4c", "4c")
311                     tbl_dict[tst_name_mod] = {"name": name,
312                                               "ref-data": list(),
313                                               "cmp-data": list()}
314                 try:
315                     # TODO: Re-work when NDRPDRDISC tests are not used
316                     if table["include-tests"] == "MRR":
317                         tbl_dict[tst_name_mod]["cmp-data"]. \
318                             append(tst_data["result"]["receive-rate"].avg)
319                     elif table["include-tests"] == "PDR":
320                         if tst_data["type"] == "PDR":
321                             tbl_dict[tst_name_mod]["cmp-data"]. \
322                                 append(tst_data["throughput"]["value"])
323                         elif tst_data["type"] == "NDRPDR":
324                             tbl_dict[tst_name_mod]["cmp-data"].append(
325                                 tst_data["throughput"]["PDR"]["LOWER"])
326                     elif table["include-tests"] == "NDR":
327                         if tst_data["type"] == "NDR":
328                             tbl_dict[tst_name_mod]["cmp-data"]. \
329                                 append(tst_data["throughput"]["value"])
330                         elif tst_data["type"] == "NDRPDR":
331                             tbl_dict[tst_name_mod]["cmp-data"].append(
332                                 tst_data["throughput"]["NDR"]["LOWER"])
333                     else:
334                         continue
335                 except (KeyError, TypeError):
336                     pass
337     if history:
338         for item in history:
339             for job, builds in item["data"].items():
340                 for build in builds:
341                     for tst_name, tst_data in data[job][str(build)].iteritems():
342                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344                             replace("-ndrdisc", "").replace("-pdr", ""). \
345                             replace("-ndr", "").\
346                             replace("1t1c", "1c").replace("2t1c", "1c").\
347                             replace("2t2c", "2c").replace("4t2c", "2c").\
348                             replace("4t4c", "4c").replace("8t4c", "4c")
349                         if "across topologies" in table["title"].lower():
350                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
351                         if tbl_dict.get(tst_name_mod, None) is None:
352                             continue
353                         if tbl_dict[tst_name_mod].get("history", None) is None:
354                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
355                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
356                                                              None) is None:
357                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
358                                 list()
359                         try:
360                             # TODO: Re-work when NDRPDRDISC tests are not used
361                             if table["include-tests"] == "MRR":
362                                 tbl_dict[tst_name_mod]["history"][item["title"
363                                 ]].append(tst_data["result"]["receive-rate"].
364                                           avg)
365                             elif table["include-tests"] == "PDR":
366                                 if tst_data["type"] == "PDR":
367                                     tbl_dict[tst_name_mod]["history"][
368                                         item["title"]].\
369                                         append(tst_data["throughput"]["value"])
370                                 elif tst_data["type"] == "NDRPDR":
371                                     tbl_dict[tst_name_mod]["history"][item[
372                                         "title"]].append(tst_data["throughput"][
373                                         "PDR"]["LOWER"])
374                             elif table["include-tests"] == "NDR":
375                                 if tst_data["type"] == "NDR":
376                                     tbl_dict[tst_name_mod]["history"][
377                                         item["title"]].\
378                                         append(tst_data["throughput"]["value"])
379                                 elif tst_data["type"] == "NDRPDR":
380                                     tbl_dict[tst_name_mod]["history"][item[
381                                         "title"]].append(tst_data["throughput"][
382                                         "NDR"]["LOWER"])
383                             else:
384                                 continue
385                         except (TypeError, KeyError):
386                             pass
387
388     tbl_lst = list()
389     footnote = False
390     for tst_name in tbl_dict.keys():
391         item = [tbl_dict[tst_name]["name"], ]
392         if history:
393             if tbl_dict[tst_name].get("history", None) is not None:
394                 for hist_data in tbl_dict[tst_name]["history"].values():
395                     if hist_data:
396                         item.append(round(mean(hist_data) / 1000000, 2))
397                         item.append(round(stdev(hist_data) / 1000000, 2))
398                     else:
399                         item.extend(["Not tested", "Not tested"])
400             else:
401                 item.extend(["Not tested", "Not tested"])
402         data_t = tbl_dict[tst_name]["ref-data"]
403         if data_t:
404             item.append(round(mean(data_t) / 1000000, 2))
405             item.append(round(stdev(data_t) / 1000000, 2))
406         else:
407             item.extend(["Not tested", "Not tested"])
408         data_t = tbl_dict[tst_name]["cmp-data"]
409         if data_t:
410             item.append(round(mean(data_t) / 1000000, 2))
411             item.append(round(stdev(data_t) / 1000000, 2))
412         else:
413             item.extend(["Not tested", "Not tested"])
414         if item[-2] == "Not tested":
415             pass
416         elif item[-4] == "Not tested":
417             item.append("New in CSIT-1908")
418         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419             item.append("See footnote [1]")
420             footnote = True
421         elif item[-4] != 0:
422             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
423         if (len(item) == len(header)) and (item[-3] != "Not tested"):
424             tbl_lst.append(item)
425
426     # Sort the table according to the relative change
427     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
428
429     # Generate csv tables:
430     csv_file = "{0}.csv".format(table["output-file"])
431     with open(csv_file, "w") as file_handler:
432         file_handler.write(header_str)
433         for test in tbl_lst:
434             file_handler.write(",".join([str(item) for item in test]) + "\n")
435
436     txt_file_name = "{0}.txt".format(table["output-file"])
437     convert_csv_to_pretty_txt(csv_file, txt_file_name)
438
439     if footnote:
440         with open(txt_file_name, 'a') as txt_file:
441             txt_file.writelines([
442                 "\nFootnotes:\n",
443                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
444                 "2n-skx testbeds, dot1q encapsulation is now used on both "
445                 "links of SUT.\n",
446                 "    Previously dot1q was used only on a single link with the "
447                 "other link carrying untagged Ethernet frames. This change "
448                 "results\n",
449                 "    in slightly lower throughput in CSIT-1908 for these "
450                 "tests. See release notes."
451             ])
452
453
454 def table_performance_comparison_nic(table, input_data):
455     """Generate the table(s) with algorithm: table_performance_comparison
456     specified in the specification file.
457
458     :param table: Table to generate.
459     :param input_data: Data to process.
460     :type table: pandas.Series
461     :type input_data: InputData
462     """
463
464     logging.info("  Generating the table {0} ...".
465                  format(table.get("title", "")))
466
467     # Transform the data
468     logging.info("    Creating the data set for the {0} '{1}'.".
469                  format(table.get("type", ""), table.get("title", "")))
470     data = input_data.filter_data(table, continue_on_error=True)
471
472     # Prepare the header of the tables
473     try:
474         header = ["Test case", ]
475
476         if table["include-tests"] == "MRR":
477             hdr_param = "Rec Rate"
478         else:
479             hdr_param = "Thput"
480
481         history = table.get("history", None)
482         if history:
483             for item in history:
484                 header.extend(
485                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
486                      "{0} Stdev [Mpps]".format(item["title"])])
487         header.extend(
488             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
489              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
490              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
491              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
492              "Delta [%]"])
493         header_str = ",".join(header) + "\n"
494     except (AttributeError, KeyError) as err:
495         logging.error("The model is invalid, missing parameter: {0}".
496                       format(err))
497         return
498
499     # Prepare data to the table:
500     tbl_dict = dict()
501     for job, builds in table["reference"]["data"].items():
502         topo = "2n-skx" if "2n-skx" in job else ""
503         for build in builds:
504             for tst_name, tst_data in data[job][str(build)].iteritems():
505                 if table["reference"]["nic"] not in tst_data["tags"]:
506                     continue
507                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
508                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
509                     replace("-ndrdisc", "").replace("-pdr", "").\
510                     replace("-ndr", "").\
511                     replace("1t1c", "1c").replace("2t1c", "1c").\
512                     replace("2t2c", "2c").replace("4t2c", "2c").\
513                     replace("4t4c", "4c").replace("8t4c", "4c")
514                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
515                 if "across topologies" in table["title"].lower():
516                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
517                 if tbl_dict.get(tst_name_mod, None) is None:
518                     name = "{0}".format("-".join(tst_data["name"].
519                                                  split("-")[:-1]))
520                     if "across testbeds" in table["title"].lower() or \
521                             "across topologies" in table["title"].lower():
522                         name = name.\
523                             replace("1t1c", "1c").replace("2t1c", "1c").\
524                             replace("2t2c", "2c").replace("4t2c", "2c").\
525                             replace("4t4c", "4c").replace("8t4c", "4c")
526                     tbl_dict[tst_name_mod] = {"name": name,
527                                               "ref-data": list(),
528                                               "cmp-data": list()}
529                 try:
530                     # TODO: Re-work when NDRPDRDISC tests are not used
531                     if table["include-tests"] == "MRR":
532                         tbl_dict[tst_name_mod]["ref-data"]. \
533                             append(tst_data["result"]["receive-rate"].avg)
534                     elif table["include-tests"] == "PDR":
535                         if tst_data["type"] == "PDR":
536                             tbl_dict[tst_name_mod]["ref-data"]. \
537                                 append(tst_data["throughput"]["value"])
538                         elif tst_data["type"] == "NDRPDR":
539                             tbl_dict[tst_name_mod]["ref-data"].append(
540                                 tst_data["throughput"]["PDR"]["LOWER"])
541                     elif table["include-tests"] == "NDR":
542                         if tst_data["type"] == "NDR":
543                             tbl_dict[tst_name_mod]["ref-data"]. \
544                                 append(tst_data["throughput"]["value"])
545                         elif tst_data["type"] == "NDRPDR":
546                             tbl_dict[tst_name_mod]["ref-data"].append(
547                                 tst_data["throughput"]["NDR"]["LOWER"])
548                     else:
549                         continue
550                 except TypeError:
551                     pass  # No data in output.xml for this test
552
553     for job, builds in table["compare"]["data"].items():
554         for build in builds:
555             for tst_name, tst_data in data[job][str(build)].iteritems():
556                 if table["compare"]["nic"] not in tst_data["tags"]:
557                     continue
558                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
559                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
560                     replace("-ndrdisc", "").replace("-pdr", ""). \
561                     replace("-ndr", "").\
562                     replace("1t1c", "1c").replace("2t1c", "1c").\
563                     replace("2t2c", "2c").replace("4t2c", "2c").\
564                     replace("4t4c", "4c").replace("8t4c", "4c")
565                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
566                 if "across topologies" in table["title"].lower():
567                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
568                 if tbl_dict.get(tst_name_mod, None) is None:
569                     name = "{0}".format("-".join(tst_data["name"].
570                                                  split("-")[:-1]))
571                     if "across testbeds" in table["title"].lower() or \
572                             "across topologies" in table["title"].lower():
573                         name = name.\
574                             replace("1t1c", "1c").replace("2t1c", "1c").\
575                             replace("2t2c", "2c").replace("4t2c", "2c").\
576                             replace("4t4c", "4c").replace("8t4c", "4c")
577                     tbl_dict[tst_name_mod] = {"name": name,
578                                               "ref-data": list(),
579                                               "cmp-data": list()}
580                 try:
581                     # TODO: Re-work when NDRPDRDISC tests are not used
582                     if table["include-tests"] == "MRR":
583                         tbl_dict[tst_name_mod]["cmp-data"]. \
584                             append(tst_data["result"]["receive-rate"].avg)
585                     elif table["include-tests"] == "PDR":
586                         if tst_data["type"] == "PDR":
587                             tbl_dict[tst_name_mod]["cmp-data"]. \
588                                 append(tst_data["throughput"]["value"])
589                         elif tst_data["type"] == "NDRPDR":
590                             tbl_dict[tst_name_mod]["cmp-data"].append(
591                                 tst_data["throughput"]["PDR"]["LOWER"])
592                     elif table["include-tests"] == "NDR":
593                         if tst_data["type"] == "NDR":
594                             tbl_dict[tst_name_mod]["cmp-data"]. \
595                                 append(tst_data["throughput"]["value"])
596                         elif tst_data["type"] == "NDRPDR":
597                             tbl_dict[tst_name_mod]["cmp-data"].append(
598                                 tst_data["throughput"]["NDR"]["LOWER"])
599                     else:
600                         continue
601                 except (KeyError, TypeError):
602                     pass
603
604     if history:
605         for item in history:
606             for job, builds in item["data"].items():
607                 for build in builds:
608                     for tst_name, tst_data in data[job][str(build)].iteritems():
609                         if item["nic"] not in tst_data["tags"]:
610                             continue
611                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
612                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
613                             replace("-ndrdisc", "").replace("-pdr", ""). \
614                             replace("-ndr", "").\
615                             replace("1t1c", "1c").replace("2t1c", "1c").\
616                             replace("2t2c", "2c").replace("4t2c", "2c").\
617                             replace("4t4c", "4c").replace("8t4c", "4c")
618                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
619                         if "across topologies" in table["title"].lower():
620                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
621                         if tbl_dict.get(tst_name_mod, None) is None:
622                             continue
623                         if tbl_dict[tst_name_mod].get("history", None) is None:
624                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
625                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
626                                                              None) is None:
627                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
628                                 list()
629                         try:
630                             # TODO: Re-work when NDRPDRDISC tests are not used
631                             if table["include-tests"] == "MRR":
632                                 tbl_dict[tst_name_mod]["history"][item["title"
633                                 ]].append(tst_data["result"]["receive-rate"].
634                                           avg)
635                             elif table["include-tests"] == "PDR":
636                                 if tst_data["type"] == "PDR":
637                                     tbl_dict[tst_name_mod]["history"][
638                                         item["title"]].\
639                                         append(tst_data["throughput"]["value"])
640                                 elif tst_data["type"] == "NDRPDR":
641                                     tbl_dict[tst_name_mod]["history"][item[
642                                         "title"]].append(tst_data["throughput"][
643                                         "PDR"]["LOWER"])
644                             elif table["include-tests"] == "NDR":
645                                 if tst_data["type"] == "NDR":
646                                     tbl_dict[tst_name_mod]["history"][
647                                         item["title"]].\
648                                         append(tst_data["throughput"]["value"])
649                                 elif tst_data["type"] == "NDRPDR":
650                                     tbl_dict[tst_name_mod]["history"][item[
651                                         "title"]].append(tst_data["throughput"][
652                                         "NDR"]["LOWER"])
653                             else:
654                                 continue
655                         except (TypeError, KeyError):
656                             pass
657
658     tbl_lst = list()
659     footnote = False
660     for tst_name in tbl_dict.keys():
661         item = [tbl_dict[tst_name]["name"], ]
662         if history:
663             if tbl_dict[tst_name].get("history", None) is not None:
664                 for hist_data in tbl_dict[tst_name]["history"].values():
665                     if hist_data:
666                         item.append(round(mean(hist_data) / 1000000, 2))
667                         item.append(round(stdev(hist_data) / 1000000, 2))
668                     else:
669                         item.extend(["Not tested", "Not tested"])
670             else:
671                 item.extend(["Not tested", "Not tested"])
672         data_t = tbl_dict[tst_name]["ref-data"]
673         if data_t:
674             item.append(round(mean(data_t) / 1000000, 2))
675             item.append(round(stdev(data_t) / 1000000, 2))
676         else:
677             item.extend(["Not tested", "Not tested"])
678         data_t = tbl_dict[tst_name]["cmp-data"]
679         if data_t:
680             item.append(round(mean(data_t) / 1000000, 2))
681             item.append(round(stdev(data_t) / 1000000, 2))
682         else:
683             item.extend(["Not tested", "Not tested"])
684         if item[-2] == "Not tested":
685             pass
686         elif item[-4] == "Not tested":
687             item.append("New in CSIT-1908")
688         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
689             item.append("See footnote [1]")
690             footnote = True
691         elif item[-4] != 0:
692             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
693         if (len(item) == len(header)) and (item[-3] != "Not tested"):
694             tbl_lst.append(item)
695
696     # Sort the table according to the relative change
697     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
698
699     # Generate csv tables:
700     csv_file = "{0}.csv".format(table["output-file"])
701     with open(csv_file, "w") as file_handler:
702         file_handler.write(header_str)
703         for test in tbl_lst:
704             file_handler.write(",".join([str(item) for item in test]) + "\n")
705
706     txt_file_name = "{0}.txt".format(table["output-file"])
707     convert_csv_to_pretty_txt(csv_file, txt_file_name)
708
709     if footnote:
710         with open(txt_file_name, 'a') as txt_file:
711             txt_file.writelines([
712                 "\nFootnotes:\n",
713                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
714                 "2n-skx testbeds, dot1q encapsulation is now used on both "
715                 "links of SUT.\n",
716                 "    Previously dot1q was used only on a single link with the "
717                 "other link carrying untagged Ethernet frames. This change "
718                 "results\n",
719                 "    in slightly lower throughput in CSIT-1908 for these "
720                 "tests. See release notes."
721             ])
722
723
724 def table_nics_comparison(table, input_data):
725     """Generate the table(s) with algorithm: table_nics_comparison
726     specified in the specification file.
727
728     :param table: Table to generate.
729     :param input_data: Data to process.
730     :type table: pandas.Series
731     :type input_data: InputData
732     """
733
734     logging.info("  Generating the table {0} ...".
735                  format(table.get("title", "")))
736
737     # Transform the data
738     logging.info("    Creating the data set for the {0} '{1}'.".
739                  format(table.get("type", ""), table.get("title", "")))
740     data = input_data.filter_data(table, continue_on_error=True)
741
742     # Prepare the header of the tables
743     try:
744         header = ["Test case", ]
745
746         if table["include-tests"] == "MRR":
747             hdr_param = "Rec Rate"
748         else:
749             hdr_param = "Thput"
750
751         header.extend(
752             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
753              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
754              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
755              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
756              "Delta [%]"])
757         header_str = ",".join(header) + "\n"
758     except (AttributeError, KeyError) as err:
759         logging.error("The model is invalid, missing parameter: {0}".
760                       format(err))
761         return
762
763     # Prepare data to the table:
764     tbl_dict = dict()
765     for job, builds in table["data"].items():
766         for build in builds:
767             for tst_name, tst_data in data[job][str(build)].iteritems():
768                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
769                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
770                     replace("-ndrdisc", "").replace("-pdr", "").\
771                     replace("-ndr", "").\
772                     replace("1t1c", "1c").replace("2t1c", "1c").\
773                     replace("2t2c", "2c").replace("4t2c", "2c").\
774                     replace("4t4c", "4c").replace("8t4c", "4c")
775                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
776                 if tbl_dict.get(tst_name_mod, None) is None:
777                     name = "-".join(tst_data["name"].split("-")[:-1])
778                     tbl_dict[tst_name_mod] = {"name": name,
779                                               "ref-data": list(),
780                                               "cmp-data": list()}
781                 try:
782                     if table["include-tests"] == "MRR":
783                         result = tst_data["result"]["receive-rate"].avg
784                     elif table["include-tests"] == "PDR":
785                         result = tst_data["throughput"]["PDR"]["LOWER"]
786                     elif table["include-tests"] == "NDR":
787                         result = tst_data["throughput"]["NDR"]["LOWER"]
788                     else:
789                         result = None
790
791                     if result:
792                         if table["reference"]["nic"] in tst_data["tags"]:
793                             tbl_dict[tst_name_mod]["ref-data"].append(result)
794                         elif table["compare"]["nic"] in tst_data["tags"]:
795                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
796                 except (TypeError, KeyError) as err:
797                     logging.debug("No data for {0}".format(tst_name))
798                     logging.debug(repr(err))
799                     # No data in output.xml for this test
800
801     tbl_lst = list()
802     for tst_name in tbl_dict.keys():
803         item = [tbl_dict[tst_name]["name"], ]
804         data_t = tbl_dict[tst_name]["ref-data"]
805         if data_t:
806             item.append(round(mean(data_t) / 1000000, 2))
807             item.append(round(stdev(data_t) / 1000000, 2))
808         else:
809             item.extend([None, None])
810         data_t = tbl_dict[tst_name]["cmp-data"]
811         if data_t:
812             item.append(round(mean(data_t) / 1000000, 2))
813             item.append(round(stdev(data_t) / 1000000, 2))
814         else:
815             item.extend([None, None])
816         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
817             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
818         if len(item) == len(header):
819             tbl_lst.append(item)
820
821     # Sort the table according to the relative change
822     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
823
824     # Generate csv tables:
825     csv_file = "{0}.csv".format(table["output-file"])
826     with open(csv_file, "w") as file_handler:
827         file_handler.write(header_str)
828         for test in tbl_lst:
829             file_handler.write(",".join([str(item) for item in test]) + "\n")
830
831     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
832
833
834 def table_soak_vs_ndr(table, input_data):
835     """Generate the table(s) with algorithm: table_soak_vs_ndr
836     specified in the specification file.
837
838     :param table: Table to generate.
839     :param input_data: Data to process.
840     :type table: pandas.Series
841     :type input_data: InputData
842     """
843
844     logging.info("  Generating the table {0} ...".
845                  format(table.get("title", "")))
846
847     # Transform the data
848     logging.info("    Creating the data set for the {0} '{1}'.".
849                  format(table.get("type", ""), table.get("title", "")))
850     data = input_data.filter_data(table, continue_on_error=True)
851
852     # Prepare the header of the table
853     try:
854         header = [
855             "Test case",
856             "{0} Thput [Mpps]".format(table["reference"]["title"]),
857             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
858             "{0} Thput [Mpps]".format(table["compare"]["title"]),
859             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
860             "Delta [%]", "Stdev of delta [%]"]
861         header_str = ",".join(header) + "\n"
862     except (AttributeError, KeyError) as err:
863         logging.error("The model is invalid, missing parameter: {0}".
864                       format(err))
865         return
866
867     # Create a list of available SOAK test results:
868     tbl_dict = dict()
869     for job, builds in table["compare"]["data"].items():
870         for build in builds:
871             for tst_name, tst_data in data[job][str(build)].iteritems():
872                 if tst_data["type"] == "SOAK":
873                     tst_name_mod = tst_name.replace("-soak", "")
874                     if tbl_dict.get(tst_name_mod, None) is None:
875                         groups = re.search(REGEX_NIC, tst_data["parent"])
876                         nic = groups.group(0) if groups else ""
877                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
878                                                               split("-")[:-1]))
879                         tbl_dict[tst_name_mod] = {
880                             "name": name,
881                             "ref-data": list(),
882                             "cmp-data": list()
883                         }
884                     try:
885                         tbl_dict[tst_name_mod]["cmp-data"].append(
886                             tst_data["throughput"]["LOWER"])
887                     except (KeyError, TypeError):
888                         pass
889     tests_lst = tbl_dict.keys()
890
891     # Add corresponding NDR test results:
892     for job, builds in table["reference"]["data"].items():
893         for build in builds:
894             for tst_name, tst_data in data[job][str(build)].iteritems():
895                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
896                     replace("-mrr", "")
897                 if tst_name_mod in tests_lst:
898                     try:
899                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
900                             if table["include-tests"] == "MRR":
901                                 result = tst_data["result"]["receive-rate"].avg
902                             elif table["include-tests"] == "PDR":
903                                 result = tst_data["throughput"]["PDR"]["LOWER"]
904                             elif table["include-tests"] == "NDR":
905                                 result = tst_data["throughput"]["NDR"]["LOWER"]
906                             else:
907                                 result = None
908                             if result is not None:
909                                 tbl_dict[tst_name_mod]["ref-data"].append(
910                                     result)
911                     except (KeyError, TypeError):
912                         continue
913
914     tbl_lst = list()
915     for tst_name in tbl_dict.keys():
916         item = [tbl_dict[tst_name]["name"], ]
917         data_r = tbl_dict[tst_name]["ref-data"]
918         if data_r:
919             data_r_mean = mean(data_r)
920             item.append(round(data_r_mean / 1000000, 2))
921             data_r_stdev = stdev(data_r)
922             item.append(round(data_r_stdev / 1000000, 2))
923         else:
924             data_r_mean = None
925             data_r_stdev = None
926             item.extend([None, None])
927         data_c = tbl_dict[tst_name]["cmp-data"]
928         if data_c:
929             data_c_mean = mean(data_c)
930             item.append(round(data_c_mean / 1000000, 2))
931             data_c_stdev = stdev(data_c)
932             item.append(round(data_c_stdev / 1000000, 2))
933         else:
934             data_c_mean = None
935             data_c_stdev = None
936             item.extend([None, None])
937         if data_r_mean and data_c_mean:
938             delta, d_stdev = relative_change_stdev(
939                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
940             item.append(round(delta, 2))
941             item.append(round(d_stdev, 2))
942             tbl_lst.append(item)
943
944     # Sort the table according to the relative change
945     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
946
947     # Generate csv tables:
948     csv_file = "{0}.csv".format(table["output-file"])
949     with open(csv_file, "w") as file_handler:
950         file_handler.write(header_str)
951         for test in tbl_lst:
952             file_handler.write(",".join([str(item) for item in test]) + "\n")
953
954     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
955
956
957 def table_performance_trending_dashboard(table, input_data):
958     """Generate the table(s) with algorithm:
959     table_performance_trending_dashboard
960     specified in the specification file.
961
962     :param table: Table to generate.
963     :param input_data: Data to process.
964     :type table: pandas.Series
965     :type input_data: InputData
966     """
967
968     logging.info("  Generating the table {0} ...".
969                  format(table.get("title", "")))
970
971     # Transform the data
972     logging.info("    Creating the data set for the {0} '{1}'.".
973                  format(table.get("type", ""), table.get("title", "")))
974     data = input_data.filter_data(table, continue_on_error=True)
975
976     # Prepare the header of the tables
977     header = ["Test Case",
978               "Trend [Mpps]",
979               "Short-Term Change [%]",
980               "Long-Term Change [%]",
981               "Regressions [#]",
982               "Progressions [#]"
983               ]
984     header_str = ",".join(header) + "\n"
985
986     # Prepare data to the table:
987     tbl_dict = dict()
988     for job, builds in table["data"].items():
989         for build in builds:
990             for tst_name, tst_data in data[job][str(build)].iteritems():
991                 if tst_name.lower() in table.get("ignore-list", list()):
992                     continue
993                 if tbl_dict.get(tst_name, None) is None:
994                     groups = re.search(REGEX_NIC, tst_data["parent"])
995                     if not groups:
996                         continue
997                     nic = groups.group(0)
998                     tbl_dict[tst_name] = {
999                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1000                         "data": OrderedDict()}
1001                 try:
1002                     tbl_dict[tst_name]["data"][str(build)] = \
1003                         tst_data["result"]["receive-rate"]
1004                 except (TypeError, KeyError):
1005                     pass  # No data in output.xml for this test
1006
1007     tbl_lst = list()
1008     for tst_name in tbl_dict.keys():
1009         data_t = tbl_dict[tst_name]["data"]
1010         if len(data_t) < 2:
1011             continue
1012
1013         classification_lst, avgs = classify_anomalies(data_t)
1014
1015         win_size = min(len(data_t), table["window"])
1016         long_win_size = min(len(data_t), table["long-trend-window"])
1017
1018         try:
1019             max_long_avg = max(
1020                 [x for x in avgs[-long_win_size:-win_size]
1021                  if not isnan(x)])
1022         except ValueError:
1023             max_long_avg = nan
1024         last_avg = avgs[-1]
1025         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1026
1027         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1028             rel_change_last = nan
1029         else:
1030             rel_change_last = round(
1031                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1032
1033         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1034             rel_change_long = nan
1035         else:
1036             rel_change_long = round(
1037                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1038
1039         if classification_lst:
1040             if isnan(rel_change_last) and isnan(rel_change_long):
1041                 continue
1042             if (isnan(last_avg) or
1043                 isnan(rel_change_last) or
1044                 isnan(rel_change_long)):
1045                 continue
1046             tbl_lst.append(
1047                 [tbl_dict[tst_name]["name"],
1048                  round(last_avg / 1000000, 2),
1049                  rel_change_last,
1050                  rel_change_long,
1051                  classification_lst[-win_size:].count("regression"),
1052                  classification_lst[-win_size:].count("progression")])
1053
1054     tbl_lst.sort(key=lambda rel: rel[0])
1055
1056     tbl_sorted = list()
1057     for nrr in range(table["window"], -1, -1):
1058         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1059         for nrp in range(table["window"], -1, -1):
1060             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1061             tbl_out.sort(key=lambda rel: rel[2])
1062             tbl_sorted.extend(tbl_out)
1063
1064     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1065
1066     logging.info("    Writing file: '{0}'".format(file_name))
1067     with open(file_name, "w") as file_handler:
1068         file_handler.write(header_str)
1069         for test in tbl_sorted:
1070             file_handler.write(",".join([str(item) for item in test]) + '\n')
1071
1072     txt_file_name = "{0}.txt".format(table["output-file"])
1073     logging.info("    Writing file: '{0}'".format(txt_file_name))
1074     convert_csv_to_pretty_txt(file_name, txt_file_name)
1075
1076
1077 def _generate_url(base, testbed, test_name):
1078     """Generate URL to a trending plot from the name of the test case.
1079
1080     :param base: The base part of URL common to all test cases.
1081     :param testbed: The testbed used for testing.
1082     :param test_name: The name of the test case.
1083     :type base: str
1084     :type testbed: str
1085     :type test_name: str
1086     :returns: The URL to the plot with the trending data for the given test
1087         case.
1088     :rtype str
1089     """
1090
1091     url = base
1092     file_name = ""
1093     anchor = ".html#"
1094     feature = ""
1095
1096     if "lbdpdk" in test_name or "lbvpp" in test_name:
1097         file_name = "link_bonding"
1098
1099     elif "114b" in test_name and "vhost" in test_name:
1100         file_name = "vts"
1101
1102     elif "testpmd" in test_name or "l3fwd" in test_name:
1103         file_name = "dpdk"
1104
1105     elif "memif" in test_name:
1106         file_name = "container_memif"
1107         feature = "-base"
1108
1109     elif "srv6" in test_name:
1110         file_name = "srv6"
1111
1112     elif "vhost" in test_name:
1113         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1114             file_name = "vm_vhost_l2"
1115             if "114b" in test_name:
1116                 feature = ""
1117             elif "l2xcbase" in test_name and "x520" in test_name:
1118                 feature = "-base-l2xc"
1119             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1120                 feature = "-base-l2bd"
1121             else:
1122                 feature = "-base"
1123         elif "ip4base" in test_name:
1124             file_name = "vm_vhost_ip4"
1125             feature = "-base"
1126
1127     elif "ipsecbasetnlsw" in test_name:
1128         file_name = "ipsecsw"
1129         feature = "-base-scale"
1130
1131     elif "ipsec" in test_name:
1132         file_name = "ipsec"
1133         feature = "-base-scale"
1134         if "hw-" in test_name:
1135             file_name = "ipsechw"
1136         elif "sw-" in test_name:
1137             file_name = "ipsecsw"
1138
1139     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1140         file_name = "ip4_tunnels"
1141         feature = "-base"
1142
1143     elif "ip4base" in test_name or "ip4scale" in test_name:
1144         file_name = "ip4"
1145         if "xl710" in test_name:
1146             feature = "-base-scale-features"
1147         elif "iacl" in test_name:
1148             feature = "-features-iacl"
1149         elif "oacl" in test_name:
1150             feature = "-features-oacl"
1151         elif "snat" in test_name or "cop" in test_name:
1152             feature = "-features"
1153         else:
1154             feature = "-base-scale"
1155
1156     elif "ip6base" in test_name or "ip6scale" in test_name:
1157         file_name = "ip6"
1158         feature = "-base-scale"
1159
1160     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1161             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1162             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1163         file_name = "l2"
1164         if "macip" in test_name:
1165             feature = "-features-macip"
1166         elif "iacl" in test_name:
1167             feature = "-features-iacl"
1168         elif "oacl" in test_name:
1169             feature = "-features-oacl"
1170         else:
1171             feature = "-base-scale"
1172
1173     if "x520" in test_name:
1174         nic = "x520-"
1175     elif "x710" in test_name:
1176         nic = "x710-"
1177     elif "xl710" in test_name:
1178         nic = "xl710-"
1179     elif "xxv710" in test_name:
1180         nic = "xxv710-"
1181     elif "vic1227" in test_name:
1182         nic = "vic1227-"
1183     elif "vic1385" in test_name:
1184         nic = "vic1385-"
1185     else:
1186         nic = ""
1187     anchor += nic
1188
1189     if "64b" in test_name:
1190         framesize = "64b"
1191     elif "78b" in test_name:
1192         framesize = "78b"
1193     elif "imix" in test_name:
1194         framesize = "imix"
1195     elif "9000b" in test_name:
1196         framesize = "9000b"
1197     elif "1518b" in test_name:
1198         framesize = "1518b"
1199     elif "114b" in test_name:
1200         framesize = "114b"
1201     else:
1202         framesize = ""
1203     anchor += framesize + '-'
1204
1205     if "1t1c" in test_name:
1206         anchor += "1t1c"
1207     elif "2t2c" in test_name:
1208         anchor += "2t2c"
1209     elif "4t4c" in test_name:
1210         anchor += "4t4c"
1211     elif "2t1c" in test_name:
1212         anchor += "2t1c"
1213     elif "4t2c" in test_name:
1214         anchor += "4t2c"
1215     elif "8t4c" in test_name:
1216         anchor += "8t4c"
1217
1218     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1219         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1220
1221
1222 def table_performance_trending_dashboard_html(table, input_data):
1223     """Generate the table(s) with algorithm:
1224     table_performance_trending_dashboard_html specified in the specification
1225     file.
1226
1227     :param table: Table to generate.
1228     :param input_data: Data to process.
1229     :type table: dict
1230     :type input_data: InputData
1231     """
1232
1233     testbed = table.get("testbed", None)
1234     if testbed is None:
1235         logging.error("The testbed is not defined for the table '{0}'.".
1236                       format(table.get("title", "")))
1237         return
1238
1239     logging.info("  Generating the table {0} ...".
1240                  format(table.get("title", "")))
1241
1242     try:
1243         with open(table["input-file"], 'rb') as csv_file:
1244             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1245             csv_lst = [item for item in csv_content]
1246     except KeyError:
1247         logging.warning("The input file is not defined.")
1248         return
1249     except csv.Error as err:
1250         logging.warning("Not possible to process the file '{0}'.\n{1}".
1251                         format(table["input-file"], err))
1252         return
1253
1254     # Table:
1255     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1256
1257     # Table header:
1258     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1259     for idx, item in enumerate(csv_lst[0]):
1260         alignment = "left" if idx == 0 else "center"
1261         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1262         th.text = item
1263
1264     # Rows:
1265     colors = {"regression": ("#ffcccc", "#ff9999"),
1266               "progression": ("#c6ecc6", "#9fdf9f"),
1267               "normal": ("#e9f1fb", "#d4e4f7")}
1268     for r_idx, row in enumerate(csv_lst[1:]):
1269         if int(row[4]):
1270             color = "regression"
1271         elif int(row[5]):
1272             color = "progression"
1273         else:
1274             color = "normal"
1275         background = colors[color][r_idx % 2]
1276         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1277
1278         # Columns:
1279         for c_idx, item in enumerate(row):
1280             alignment = "left" if c_idx == 0 else "center"
1281             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1282             # Name:
1283             if c_idx == 0:
1284                 url = _generate_url("../trending/", testbed, item)
1285                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1286                 ref.text = item
1287             else:
1288                 td.text = item
1289     try:
1290         with open(table["output-file"], 'w') as html_file:
1291             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1292             html_file.write(".. raw:: html\n\n\t")
1293             html_file.write(ET.tostring(dashboard))
1294             html_file.write("\n\t<p><br><br></p>\n")
1295     except KeyError:
1296         logging.warning("The output file is not defined.")
1297         return
1298
1299
1300 def table_last_failed_tests(table, input_data):
1301     """Generate the table(s) with algorithm: table_last_failed_tests
1302     specified in the specification file.
1303
1304     :param table: Table to generate.
1305     :param input_data: Data to process.
1306     :type table: pandas.Series
1307     :type input_data: InputData
1308     """
1309
1310     logging.info("  Generating the table {0} ...".
1311                  format(table.get("title", "")))
1312
1313     # Transform the data
1314     logging.info("    Creating the data set for the {0} '{1}'.".
1315                  format(table.get("type", ""), table.get("title", "")))
1316     data = input_data.filter_data(table, continue_on_error=True)
1317
1318     if data is None or data.empty:
1319         logging.warn("    No data for the {0} '{1}'.".
1320                      format(table.get("type", ""), table.get("title", "")))
1321         return
1322
1323     tbl_list = list()
1324     for job, builds in table["data"].items():
1325         for build in builds:
1326             build = str(build)
1327             try:
1328                 version = input_data.metadata(job, build).get("version", "")
1329             except KeyError:
1330                 logging.error("Data for {job}: {build} is not present.".
1331                               format(job=job, build=build))
1332                 return
1333             tbl_list.append(build)
1334             tbl_list.append(version)
1335             for tst_name, tst_data in data[job][build].iteritems():
1336                 if tst_data["status"] != "FAIL":
1337                     continue
1338                 groups = re.search(REGEX_NIC, tst_data["parent"])
1339                 if not groups:
1340                     continue
1341                 nic = groups.group(0)
1342                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1343
1344     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1345     logging.info("    Writing file: '{0}'".format(file_name))
1346     with open(file_name, "w") as file_handler:
1347         for test in tbl_list:
1348             file_handler.write(test + '\n')
1349
1350
1351 def table_failed_tests(table, input_data):
1352     """Generate the table(s) with algorithm: table_failed_tests
1353     specified in the specification file.
1354
1355     :param table: Table to generate.
1356     :param input_data: Data to process.
1357     :type table: pandas.Series
1358     :type input_data: InputData
1359     """
1360
1361     logging.info("  Generating the table {0} ...".
1362                  format(table.get("title", "")))
1363
1364     # Transform the data
1365     logging.info("    Creating the data set for the {0} '{1}'.".
1366                  format(table.get("type", ""), table.get("title", "")))
1367     data = input_data.filter_data(table, continue_on_error=True)
1368
1369     # Prepare the header of the tables
1370     header = ["Test Case",
1371               "Failures [#]",
1372               "Last Failure [Time]",
1373               "Last Failure [VPP-Build-Id]",
1374               "Last Failure [CSIT-Job-Build-Id]"]
1375
1376     # Generate the data for the table according to the model in the table
1377     # specification
1378
1379     now = dt.utcnow()
1380     timeperiod = timedelta(int(table.get("window", 7)))
1381
1382     tbl_dict = dict()
1383     for job, builds in table["data"].items():
1384         for build in builds:
1385             build = str(build)
1386             for tst_name, tst_data in data[job][build].iteritems():
1387                 if tst_name.lower() in table.get("ignore-list", list()):
1388                     continue
1389                 if tbl_dict.get(tst_name, None) is None:
1390                     groups = re.search(REGEX_NIC, tst_data["parent"])
1391                     if not groups:
1392                         continue
1393                     nic = groups.group(0)
1394                     tbl_dict[tst_name] = {
1395                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1396                         "data": OrderedDict()}
1397                 try:
1398                     generated = input_data.metadata(job, build).\
1399                         get("generated", "")
1400                     if not generated:
1401                         continue
1402                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1403                     if (now - then) <= timeperiod:
1404                         tbl_dict[tst_name]["data"][build] = (
1405                             tst_data["status"],
1406                             generated,
1407                             input_data.metadata(job, build).get("version", ""),
1408                             build)
1409                 except (TypeError, KeyError) as err:
1410                     logging.warning("tst_name: {} - err: {}".
1411                                     format(tst_name, repr(err)))
1412
1413     max_fails = 0
1414     tbl_lst = list()
1415     for tst_data in tbl_dict.values():
1416         fails_nr = 0
1417         for val in tst_data["data"].values():
1418             if val[0] == "FAIL":
1419                 fails_nr += 1
1420                 fails_last_date = val[1]
1421                 fails_last_vpp = val[2]
1422                 fails_last_csit = val[3]
1423         if fails_nr:
1424             max_fails = fails_nr if fails_nr > max_fails else max_fails
1425             tbl_lst.append([tst_data["name"],
1426                             fails_nr,
1427                             fails_last_date,
1428                             fails_last_vpp,
1429                             "mrr-daily-build-{0}".format(fails_last_csit)])
1430
1431     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1432     tbl_sorted = list()
1433     for nrf in range(max_fails, -1, -1):
1434         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1435         tbl_sorted.extend(tbl_fails)
1436     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1437
1438     logging.info("    Writing file: '{0}'".format(file_name))
1439     with open(file_name, "w") as file_handler:
1440         file_handler.write(",".join(header) + "\n")
1441         for test in tbl_sorted:
1442             file_handler.write(",".join([str(item) for item in test]) + '\n')
1443
1444     txt_file_name = "{0}.txt".format(table["output-file"])
1445     logging.info("    Writing file: '{0}'".format(txt_file_name))
1446     convert_csv_to_pretty_txt(file_name, txt_file_name)
1447
1448
1449 def table_failed_tests_html(table, input_data):
1450     """Generate the table(s) with algorithm: table_failed_tests_html
1451     specified in the specification file.
1452
1453     :param table: Table to generate.
1454     :param input_data: Data to process.
1455     :type table: pandas.Series
1456     :type input_data: InputData
1457     """
1458
1459     testbed = table.get("testbed", None)
1460     if testbed is None:
1461         logging.error("The testbed is not defined for the table '{0}'.".
1462                       format(table.get("title", "")))
1463         return
1464
1465     logging.info("  Generating the table {0} ...".
1466                  format(table.get("title", "")))
1467
1468     try:
1469         with open(table["input-file"], 'rb') as csv_file:
1470             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1471             csv_lst = [item for item in csv_content]
1472     except KeyError:
1473         logging.warning("The input file is not defined.")
1474         return
1475     except csv.Error as err:
1476         logging.warning("Not possible to process the file '{0}'.\n{1}".
1477                         format(table["input-file"], err))
1478         return
1479
1480     # Table:
1481     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1482
1483     # Table header:
1484     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1485     for idx, item in enumerate(csv_lst[0]):
1486         alignment = "left" if idx == 0 else "center"
1487         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1488         th.text = item
1489
1490     # Rows:
1491     colors = ("#e9f1fb", "#d4e4f7")
1492     for r_idx, row in enumerate(csv_lst[1:]):
1493         background = colors[r_idx % 2]
1494         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1495
1496         # Columns:
1497         for c_idx, item in enumerate(row):
1498             alignment = "left" if c_idx == 0 else "center"
1499             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1500             # Name:
1501             if c_idx == 0:
1502                 url = _generate_url("../trending/", testbed, item)
1503                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1504                 ref.text = item
1505             else:
1506                 td.text = item
1507     try:
1508         with open(table["output-file"], 'w') as html_file:
1509             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1510             html_file.write(".. raw:: html\n\n\t")
1511             html_file.write(ET.tostring(failed_tests))
1512             html_file.write("\n\t<p><br><br></p>\n")
1513     except KeyError:
1514         logging.warning("The output file is not defined.")
1515         return