Report: Fix listing order of new vs. see footnote
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt, relative_change_stdev
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Rec Rate"
214         else:
215             hdr_param = "Thput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         topo = "2n-skx" if "2n-skx" in job else ""
239         for build in builds:
240             for tst_name, tst_data in data[job][str(build)].iteritems():
241                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
243                     replace("-ndrdisc", "").replace("-pdr", "").\
244                     replace("-ndr", "").\
245                     replace("1t1c", "1c").replace("2t1c", "1c").\
246                     replace("2t2c", "2c").replace("4t2c", "2c").\
247                     replace("4t4c", "4c").replace("8t4c", "4c")
248                 if "across topologies" in table["title"].lower():
249                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
250                 if tbl_dict.get(tst_name_mod, None) is None:
251                     groups = re.search(REGEX_NIC, tst_data["parent"])
252                     nic = groups.group(0) if groups else ""
253                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254                                                           split("-")[:-1]))
255                     if "across testbeds" in table["title"].lower() or \
256                             "across topologies" in table["title"].lower():
257                         name = name.\
258                             replace("1t1c", "1c").replace("2t1c", "1c").\
259                             replace("2t2c", "2c").replace("4t2c", "2c").\
260                             replace("4t4c", "4c").replace("8t4c", "4c")
261                     tbl_dict[tst_name_mod] = {"name": name,
262                                               "ref-data": list(),
263                                               "cmp-data": list()}
264                 try:
265                     # TODO: Re-work when NDRPDRDISC tests are not used
266                     if table["include-tests"] == "MRR":
267                         tbl_dict[tst_name_mod]["ref-data"]. \
268                             append(tst_data["result"]["receive-rate"].avg)
269                     elif table["include-tests"] == "PDR":
270                         if tst_data["type"] == "PDR":
271                             tbl_dict[tst_name_mod]["ref-data"]. \
272                                 append(tst_data["throughput"]["value"])
273                         elif tst_data["type"] == "NDRPDR":
274                             tbl_dict[tst_name_mod]["ref-data"].append(
275                                 tst_data["throughput"]["PDR"]["LOWER"])
276                     elif table["include-tests"] == "NDR":
277                         if tst_data["type"] == "NDR":
278                             tbl_dict[tst_name_mod]["ref-data"]. \
279                                 append(tst_data["throughput"]["value"])
280                         elif tst_data["type"] == "NDRPDR":
281                             tbl_dict[tst_name_mod]["ref-data"].append(
282                                 tst_data["throughput"]["NDR"]["LOWER"])
283                     else:
284                         continue
285                 except TypeError:
286                     pass  # No data in output.xml for this test
287
288     for job, builds in table["compare"]["data"].items():
289         for build in builds:
290             for tst_name, tst_data in data[job][str(build)].iteritems():
291                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293                     replace("-ndrdisc", "").replace("-pdr", ""). \
294                     replace("-ndr", "").\
295                     replace("1t1c", "1c").replace("2t1c", "1c").\
296                     replace("2t2c", "2c").replace("4t2c", "2c").\
297                     replace("4t4c", "4c").replace("8t4c", "4c")
298                 if "across topologies" in table["title"].lower():
299                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
300                 if tbl_dict.get(tst_name_mod, None) is None:
301                     groups = re.search(REGEX_NIC, tst_data["parent"])
302                     nic = groups.group(0) if groups else ""
303                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
304                                                           split("-")[:-1]))
305                     if "across testbeds" in table["title"].lower() or \
306                             "across topologies" in table["title"].lower():
307                         name = name.\
308                             replace("1t1c", "1c").replace("2t1c", "1c").\
309                             replace("2t2c", "2c").replace("4t2c", "2c").\
310                             replace("4t4c", "4c").replace("8t4c", "4c")
311                     tbl_dict[tst_name_mod] = {"name": name,
312                                               "ref-data": list(),
313                                               "cmp-data": list()}
314                 try:
315                     # TODO: Re-work when NDRPDRDISC tests are not used
316                     if table["include-tests"] == "MRR":
317                         tbl_dict[tst_name_mod]["cmp-data"]. \
318                             append(tst_data["result"]["receive-rate"].avg)
319                     elif table["include-tests"] == "PDR":
320                         if tst_data["type"] == "PDR":
321                             tbl_dict[tst_name_mod]["cmp-data"]. \
322                                 append(tst_data["throughput"]["value"])
323                         elif tst_data["type"] == "NDRPDR":
324                             tbl_dict[tst_name_mod]["cmp-data"].append(
325                                 tst_data["throughput"]["PDR"]["LOWER"])
326                     elif table["include-tests"] == "NDR":
327                         if tst_data["type"] == "NDR":
328                             tbl_dict[tst_name_mod]["cmp-data"]. \
329                                 append(tst_data["throughput"]["value"])
330                         elif tst_data["type"] == "NDRPDR":
331                             tbl_dict[tst_name_mod]["cmp-data"].append(
332                                 tst_data["throughput"]["NDR"]["LOWER"])
333                     else:
334                         continue
335                 except (KeyError, TypeError):
336                     pass
337     if history:
338         for item in history:
339             for job, builds in item["data"].items():
340                 for build in builds:
341                     for tst_name, tst_data in data[job][str(build)].iteritems():
342                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344                             replace("-ndrdisc", "").replace("-pdr", ""). \
345                             replace("-ndr", "").\
346                             replace("1t1c", "1c").replace("2t1c", "1c").\
347                             replace("2t2c", "2c").replace("4t2c", "2c").\
348                             replace("4t4c", "4c").replace("8t4c", "4c")
349                         if "across topologies" in table["title"].lower():
350                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
351                         if tbl_dict.get(tst_name_mod, None) is None:
352                             continue
353                         if tbl_dict[tst_name_mod].get("history", None) is None:
354                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
355                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
356                                                              None) is None:
357                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
358                                 list()
359                         try:
360                             # TODO: Re-work when NDRPDRDISC tests are not used
361                             if table["include-tests"] == "MRR":
362                                 tbl_dict[tst_name_mod]["history"][item["title"
363                                 ]].append(tst_data["result"]["receive-rate"].
364                                           avg)
365                             elif table["include-tests"] == "PDR":
366                                 if tst_data["type"] == "PDR":
367                                     tbl_dict[tst_name_mod]["history"][
368                                         item["title"]].\
369                                         append(tst_data["throughput"]["value"])
370                                 elif tst_data["type"] == "NDRPDR":
371                                     tbl_dict[tst_name_mod]["history"][item[
372                                         "title"]].append(tst_data["throughput"][
373                                         "PDR"]["LOWER"])
374                             elif table["include-tests"] == "NDR":
375                                 if tst_data["type"] == "NDR":
376                                     tbl_dict[tst_name_mod]["history"][
377                                         item["title"]].\
378                                         append(tst_data["throughput"]["value"])
379                                 elif tst_data["type"] == "NDRPDR":
380                                     tbl_dict[tst_name_mod]["history"][item[
381                                         "title"]].append(tst_data["throughput"][
382                                         "NDR"]["LOWER"])
383                             else:
384                                 continue
385                         except (TypeError, KeyError):
386                             pass
387
388     tbl_lst = list()
389     footnote = False
390     for tst_name in tbl_dict.keys():
391         item = [tbl_dict[tst_name]["name"], ]
392         if history:
393             if tbl_dict[tst_name].get("history", None) is not None:
394                 for hist_data in tbl_dict[tst_name]["history"].values():
395                     if hist_data:
396                         item.append(round(mean(hist_data) / 1000000, 2))
397                         item.append(round(stdev(hist_data) / 1000000, 2))
398                     else:
399                         item.extend(["Not tested", "Not tested"])
400             else:
401                 item.extend(["Not tested", "Not tested"])
402         data_t = tbl_dict[tst_name]["ref-data"]
403         if data_t:
404             item.append(round(mean(data_t) / 1000000, 2))
405             item.append(round(stdev(data_t) / 1000000, 2))
406         else:
407             item.extend(["Not tested", "Not tested"])
408         data_t = tbl_dict[tst_name]["cmp-data"]
409         if data_t:
410             item.append(round(mean(data_t) / 1000000, 2))
411             item.append(round(stdev(data_t) / 1000000, 2))
412         else:
413             item.extend(["Not tested", "Not tested"])
414         if item[-2] == "Not tested":
415             pass
416         elif item[-4] == "Not tested":
417             item.append("New in CSIT-1908")
418         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419             item.append("See footnote [1]")
420             footnote = True
421         elif item[-4] != 0:
422             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
423         if (len(item) == len(header)) and (item[-3] != "Not tested"):
424             tbl_lst.append(item)
425
426     # Sort the table:
427     # 1. New in CSIT-XXXX
428     # 2. See footnote
429     # 3. Delta
430     tbl_new = list()
431     tbl_see = list()
432     tbl_delta = list()
433     for item in tbl_lst:
434         if "New in CSIT" in item[-1]:
435             tbl_new.append(item)
436         elif "See footnote" in item[-1]:
437             tbl_see.append(item)
438         else:
439             tbl_delta.append(item)
440
441     # Sort the tables:
442     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
443     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
444     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
445     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
446
447     # Put the tables together:
448     tbl_lst = list()
449     tbl_lst.extend(tbl_new)
450     tbl_lst.extend(tbl_see)
451     tbl_lst.extend(tbl_delta)
452
453     # Generate csv tables:
454     csv_file = "{0}.csv".format(table["output-file"])
455     with open(csv_file, "w") as file_handler:
456         file_handler.write(header_str)
457         for test in tbl_lst:
458             file_handler.write(",".join([str(item) for item in test]) + "\n")
459
460     txt_file_name = "{0}.txt".format(table["output-file"])
461     convert_csv_to_pretty_txt(csv_file, txt_file_name)
462
463     if footnote:
464         with open(txt_file_name, 'a') as txt_file:
465             txt_file.writelines([
466                 "\nFootnotes:\n",
467                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
468                 "2-node testbeds, dot1q encapsulation is now used on both "
469                 "links of SUT.\n",
470                 "    Previously dot1q was used only on a single link with the "
471                 "other link carrying untagged Ethernet frames. This changes "
472                 "results\n",
473                 "    in slightly lower throughput in CSIT-1908 for these "
474                 "tests. See release notes."
475             ])
476
477
478 def table_performance_comparison_nic(table, input_data):
479     """Generate the table(s) with algorithm: table_performance_comparison
480     specified in the specification file.
481
482     :param table: Table to generate.
483     :param input_data: Data to process.
484     :type table: pandas.Series
485     :type input_data: InputData
486     """
487
488     logging.info("  Generating the table {0} ...".
489                  format(table.get("title", "")))
490
491     # Transform the data
492     logging.info("    Creating the data set for the {0} '{1}'.".
493                  format(table.get("type", ""), table.get("title", "")))
494     data = input_data.filter_data(table, continue_on_error=True)
495
496     # Prepare the header of the tables
497     try:
498         header = ["Test case", ]
499
500         if table["include-tests"] == "MRR":
501             hdr_param = "Rec Rate"
502         else:
503             hdr_param = "Thput"
504
505         history = table.get("history", None)
506         if history:
507             for item in history:
508                 header.extend(
509                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
510                      "{0} Stdev [Mpps]".format(item["title"])])
511         header.extend(
512             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
513              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
514              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
515              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
516              "Delta [%]"])
517         header_str = ",".join(header) + "\n"
518     except (AttributeError, KeyError) as err:
519         logging.error("The model is invalid, missing parameter: {0}".
520                       format(err))
521         return
522
523     # Prepare data to the table:
524     tbl_dict = dict()
525     for job, builds in table["reference"]["data"].items():
526         topo = "2n-skx" if "2n-skx" in job else ""
527         for build in builds:
528             for tst_name, tst_data in data[job][str(build)].iteritems():
529                 if table["reference"]["nic"] not in tst_data["tags"]:
530                     continue
531                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
532                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
533                     replace("-ndrdisc", "").replace("-pdr", "").\
534                     replace("-ndr", "").\
535                     replace("1t1c", "1c").replace("2t1c", "1c").\
536                     replace("2t2c", "2c").replace("4t2c", "2c").\
537                     replace("4t4c", "4c").replace("8t4c", "4c")
538                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
539                 if "across topologies" in table["title"].lower():
540                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
541                 if tbl_dict.get(tst_name_mod, None) is None:
542                     name = "{0}".format("-".join(tst_data["name"].
543                                                  split("-")[:-1]))
544                     if "across testbeds" in table["title"].lower() or \
545                             "across topologies" in table["title"].lower():
546                         name = name.\
547                             replace("1t1c", "1c").replace("2t1c", "1c").\
548                             replace("2t2c", "2c").replace("4t2c", "2c").\
549                             replace("4t4c", "4c").replace("8t4c", "4c")
550                     tbl_dict[tst_name_mod] = {"name": name,
551                                               "ref-data": list(),
552                                               "cmp-data": list()}
553                 try:
554                     # TODO: Re-work when NDRPDRDISC tests are not used
555                     if table["include-tests"] == "MRR":
556                         tbl_dict[tst_name_mod]["ref-data"]. \
557                             append(tst_data["result"]["receive-rate"].avg)
558                     elif table["include-tests"] == "PDR":
559                         if tst_data["type"] == "PDR":
560                             tbl_dict[tst_name_mod]["ref-data"]. \
561                                 append(tst_data["throughput"]["value"])
562                         elif tst_data["type"] == "NDRPDR":
563                             tbl_dict[tst_name_mod]["ref-data"].append(
564                                 tst_data["throughput"]["PDR"]["LOWER"])
565                     elif table["include-tests"] == "NDR":
566                         if tst_data["type"] == "NDR":
567                             tbl_dict[tst_name_mod]["ref-data"]. \
568                                 append(tst_data["throughput"]["value"])
569                         elif tst_data["type"] == "NDRPDR":
570                             tbl_dict[tst_name_mod]["ref-data"].append(
571                                 tst_data["throughput"]["NDR"]["LOWER"])
572                     else:
573                         continue
574                 except TypeError:
575                     pass  # No data in output.xml for this test
576
577     for job, builds in table["compare"]["data"].items():
578         for build in builds:
579             for tst_name, tst_data in data[job][str(build)].iteritems():
580                 if table["compare"]["nic"] not in tst_data["tags"]:
581                     continue
582                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
583                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
584                     replace("-ndrdisc", "").replace("-pdr", ""). \
585                     replace("-ndr", "").\
586                     replace("1t1c", "1c").replace("2t1c", "1c").\
587                     replace("2t2c", "2c").replace("4t2c", "2c").\
588                     replace("4t4c", "4c").replace("8t4c", "4c")
589                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
590                 if "across topologies" in table["title"].lower():
591                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
592                 if tbl_dict.get(tst_name_mod, None) is None:
593                     name = "{0}".format("-".join(tst_data["name"].
594                                                  split("-")[:-1]))
595                     if "across testbeds" in table["title"].lower() or \
596                             "across topologies" in table["title"].lower():
597                         name = name.\
598                             replace("1t1c", "1c").replace("2t1c", "1c").\
599                             replace("2t2c", "2c").replace("4t2c", "2c").\
600                             replace("4t4c", "4c").replace("8t4c", "4c")
601                     tbl_dict[tst_name_mod] = {"name": name,
602                                               "ref-data": list(),
603                                               "cmp-data": list()}
604                 try:
605                     # TODO: Re-work when NDRPDRDISC tests are not used
606                     if table["include-tests"] == "MRR":
607                         tbl_dict[tst_name_mod]["cmp-data"]. \
608                             append(tst_data["result"]["receive-rate"].avg)
609                     elif table["include-tests"] == "PDR":
610                         if tst_data["type"] == "PDR":
611                             tbl_dict[tst_name_mod]["cmp-data"]. \
612                                 append(tst_data["throughput"]["value"])
613                         elif tst_data["type"] == "NDRPDR":
614                             tbl_dict[tst_name_mod]["cmp-data"].append(
615                                 tst_data["throughput"]["PDR"]["LOWER"])
616                     elif table["include-tests"] == "NDR":
617                         if tst_data["type"] == "NDR":
618                             tbl_dict[tst_name_mod]["cmp-data"]. \
619                                 append(tst_data["throughput"]["value"])
620                         elif tst_data["type"] == "NDRPDR":
621                             tbl_dict[tst_name_mod]["cmp-data"].append(
622                                 tst_data["throughput"]["NDR"]["LOWER"])
623                     else:
624                         continue
625                 except (KeyError, TypeError):
626                     pass
627
628     if history:
629         for item in history:
630             for job, builds in item["data"].items():
631                 for build in builds:
632                     for tst_name, tst_data in data[job][str(build)].iteritems():
633                         if item["nic"] not in tst_data["tags"]:
634                             continue
635                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
636                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
637                             replace("-ndrdisc", "").replace("-pdr", ""). \
638                             replace("-ndr", "").\
639                             replace("1t1c", "1c").replace("2t1c", "1c").\
640                             replace("2t2c", "2c").replace("4t2c", "2c").\
641                             replace("4t4c", "4c").replace("8t4c", "4c")
642                         tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
643                         if "across topologies" in table["title"].lower():
644                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
645                         if tbl_dict.get(tst_name_mod, None) is None:
646                             continue
647                         if tbl_dict[tst_name_mod].get("history", None) is None:
648                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
649                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
650                                                              None) is None:
651                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
652                                 list()
653                         try:
654                             # TODO: Re-work when NDRPDRDISC tests are not used
655                             if table["include-tests"] == "MRR":
656                                 tbl_dict[tst_name_mod]["history"][item["title"
657                                 ]].append(tst_data["result"]["receive-rate"].
658                                           avg)
659                             elif table["include-tests"] == "PDR":
660                                 if tst_data["type"] == "PDR":
661                                     tbl_dict[tst_name_mod]["history"][
662                                         item["title"]].\
663                                         append(tst_data["throughput"]["value"])
664                                 elif tst_data["type"] == "NDRPDR":
665                                     tbl_dict[tst_name_mod]["history"][item[
666                                         "title"]].append(tst_data["throughput"][
667                                         "PDR"]["LOWER"])
668                             elif table["include-tests"] == "NDR":
669                                 if tst_data["type"] == "NDR":
670                                     tbl_dict[tst_name_mod]["history"][
671                                         item["title"]].\
672                                         append(tst_data["throughput"]["value"])
673                                 elif tst_data["type"] == "NDRPDR":
674                                     tbl_dict[tst_name_mod]["history"][item[
675                                         "title"]].append(tst_data["throughput"][
676                                         "NDR"]["LOWER"])
677                             else:
678                                 continue
679                         except (TypeError, KeyError):
680                             pass
681
682     tbl_lst = list()
683     footnote = False
684     for tst_name in tbl_dict.keys():
685         item = [tbl_dict[tst_name]["name"], ]
686         if history:
687             if tbl_dict[tst_name].get("history", None) is not None:
688                 for hist_data in tbl_dict[tst_name]["history"].values():
689                     if hist_data:
690                         item.append(round(mean(hist_data) / 1000000, 2))
691                         item.append(round(stdev(hist_data) / 1000000, 2))
692                     else:
693                         item.extend(["Not tested", "Not tested"])
694             else:
695                 item.extend(["Not tested", "Not tested"])
696         data_t = tbl_dict[tst_name]["ref-data"]
697         if data_t:
698             item.append(round(mean(data_t) / 1000000, 2))
699             item.append(round(stdev(data_t) / 1000000, 2))
700         else:
701             item.extend(["Not tested", "Not tested"])
702         data_t = tbl_dict[tst_name]["cmp-data"]
703         if data_t:
704             item.append(round(mean(data_t) / 1000000, 2))
705             item.append(round(stdev(data_t) / 1000000, 2))
706         else:
707             item.extend(["Not tested", "Not tested"])
708         if item[-2] == "Not tested":
709             pass
710         elif item[-4] == "Not tested":
711             item.append("New in CSIT-1908")
712         elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
713             item.append("See footnote [1]")
714             footnote = True
715         elif item[-4] != 0:
716             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
717         if (len(item) == len(header)) and (item[-3] != "Not tested"):
718             tbl_lst.append(item)
719
720     # Sort the table:
721     # 1. New in CSIT-XXXX
722     # 2. See footnote
723     # 3. Delta
724     tbl_new = list()
725     tbl_see = list()
726     tbl_delta = list()
727     for item in tbl_lst:
728         if "New in CSIT" in item[-1]:
729             tbl_new.append(item)
730         elif "See footnote" in item[-1]:
731             tbl_see.append(item)
732         else:
733             tbl_delta.append(item)
734
735     # Sort the tables:
736     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
737     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
738     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
739     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
740
741     # Put the tables together:
742     tbl_lst = list()
743     tbl_lst.extend(tbl_new)
744     tbl_lst.extend(tbl_see)
745     tbl_lst.extend(tbl_delta)
746
747     # Generate csv tables:
748     csv_file = "{0}.csv".format(table["output-file"])
749     with open(csv_file, "w") as file_handler:
750         file_handler.write(header_str)
751         for test in tbl_lst:
752             file_handler.write(",".join([str(item) for item in test]) + "\n")
753
754     txt_file_name = "{0}.txt".format(table["output-file"])
755     convert_csv_to_pretty_txt(csv_file, txt_file_name)
756
757     if footnote:
758         with open(txt_file_name, 'a') as txt_file:
759             txt_file.writelines([
760                 "\nFootnotes:\n",
761                 "[1] CSIT-1908 changed test methodology of dot1q tests in "
762                 "2-node testbeds, dot1q encapsulation is now used on both "
763                 "links of SUT.\n",
764                 "    Previously dot1q was used only on a single link with the "
765                 "other link carrying untagged Ethernet frames. This changes "
766                 "results\n",
767                 "    in slightly lower throughput in CSIT-1908 for these "
768                 "tests. See release notes."
769             ])
770
771
772 def table_nics_comparison(table, input_data):
773     """Generate the table(s) with algorithm: table_nics_comparison
774     specified in the specification file.
775
776     :param table: Table to generate.
777     :param input_data: Data to process.
778     :type table: pandas.Series
779     :type input_data: InputData
780     """
781
782     logging.info("  Generating the table {0} ...".
783                  format(table.get("title", "")))
784
785     # Transform the data
786     logging.info("    Creating the data set for the {0} '{1}'.".
787                  format(table.get("type", ""), table.get("title", "")))
788     data = input_data.filter_data(table, continue_on_error=True)
789
790     # Prepare the header of the tables
791     try:
792         header = ["Test case", ]
793
794         if table["include-tests"] == "MRR":
795             hdr_param = "Rec Rate"
796         else:
797             hdr_param = "Thput"
798
799         header.extend(
800             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
801              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
802              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
803              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
804              "Delta [%]"])
805         header_str = ",".join(header) + "\n"
806     except (AttributeError, KeyError) as err:
807         logging.error("The model is invalid, missing parameter: {0}".
808                       format(err))
809         return
810
811     # Prepare data to the table:
812     tbl_dict = dict()
813     for job, builds in table["data"].items():
814         for build in builds:
815             for tst_name, tst_data in data[job][str(build)].iteritems():
816                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
817                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
818                     replace("-ndrdisc", "").replace("-pdr", "").\
819                     replace("-ndr", "").\
820                     replace("1t1c", "1c").replace("2t1c", "1c").\
821                     replace("2t2c", "2c").replace("4t2c", "2c").\
822                     replace("4t4c", "4c").replace("8t4c", "4c")
823                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
824                 if tbl_dict.get(tst_name_mod, None) is None:
825                     name = "-".join(tst_data["name"].split("-")[:-1])
826                     tbl_dict[tst_name_mod] = {"name": name,
827                                               "ref-data": list(),
828                                               "cmp-data": list()}
829                 try:
830                     if table["include-tests"] == "MRR":
831                         result = tst_data["result"]["receive-rate"].avg
832                     elif table["include-tests"] == "PDR":
833                         result = tst_data["throughput"]["PDR"]["LOWER"]
834                     elif table["include-tests"] == "NDR":
835                         result = tst_data["throughput"]["NDR"]["LOWER"]
836                     else:
837                         result = None
838
839                     if result:
840                         if table["reference"]["nic"] in tst_data["tags"]:
841                             tbl_dict[tst_name_mod]["ref-data"].append(result)
842                         elif table["compare"]["nic"] in tst_data["tags"]:
843                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
844                 except (TypeError, KeyError) as err:
845                     logging.debug("No data for {0}".format(tst_name))
846                     logging.debug(repr(err))
847                     # No data in output.xml for this test
848
849     tbl_lst = list()
850     for tst_name in tbl_dict.keys():
851         item = [tbl_dict[tst_name]["name"], ]
852         data_t = tbl_dict[tst_name]["ref-data"]
853         if data_t:
854             item.append(round(mean(data_t) / 1000000, 2))
855             item.append(round(stdev(data_t) / 1000000, 2))
856         else:
857             item.extend([None, None])
858         data_t = tbl_dict[tst_name]["cmp-data"]
859         if data_t:
860             item.append(round(mean(data_t) / 1000000, 2))
861             item.append(round(stdev(data_t) / 1000000, 2))
862         else:
863             item.extend([None, None])
864         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
865             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
866         if len(item) == len(header):
867             tbl_lst.append(item)
868
869     # Sort the table according to the relative change
870     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
871
872     # Generate csv tables:
873     csv_file = "{0}.csv".format(table["output-file"])
874     with open(csv_file, "w") as file_handler:
875         file_handler.write(header_str)
876         for test in tbl_lst:
877             file_handler.write(",".join([str(item) for item in test]) + "\n")
878
879     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
880
881
882 def table_soak_vs_ndr(table, input_data):
883     """Generate the table(s) with algorithm: table_soak_vs_ndr
884     specified in the specification file.
885
886     :param table: Table to generate.
887     :param input_data: Data to process.
888     :type table: pandas.Series
889     :type input_data: InputData
890     """
891
892     logging.info("  Generating the table {0} ...".
893                  format(table.get("title", "")))
894
895     # Transform the data
896     logging.info("    Creating the data set for the {0} '{1}'.".
897                  format(table.get("type", ""), table.get("title", "")))
898     data = input_data.filter_data(table, continue_on_error=True)
899
900     # Prepare the header of the table
901     try:
902         header = [
903             "Test case",
904             "{0} Thput [Mpps]".format(table["reference"]["title"]),
905             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
906             "{0} Thput [Mpps]".format(table["compare"]["title"]),
907             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
908             "Delta [%]", "Stdev of delta [%]"]
909         header_str = ",".join(header) + "\n"
910     except (AttributeError, KeyError) as err:
911         logging.error("The model is invalid, missing parameter: {0}".
912                       format(err))
913         return
914
915     # Create a list of available SOAK test results:
916     tbl_dict = dict()
917     for job, builds in table["compare"]["data"].items():
918         for build in builds:
919             for tst_name, tst_data in data[job][str(build)].iteritems():
920                 if tst_data["type"] == "SOAK":
921                     tst_name_mod = tst_name.replace("-soak", "")
922                     if tbl_dict.get(tst_name_mod, None) is None:
923                         groups = re.search(REGEX_NIC, tst_data["parent"])
924                         nic = groups.group(0) if groups else ""
925                         name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
926                                                               split("-")[:-1]))
927                         tbl_dict[tst_name_mod] = {
928                             "name": name,
929                             "ref-data": list(),
930                             "cmp-data": list()
931                         }
932                     try:
933                         tbl_dict[tst_name_mod]["cmp-data"].append(
934                             tst_data["throughput"]["LOWER"])
935                     except (KeyError, TypeError):
936                         pass
937     tests_lst = tbl_dict.keys()
938
939     # Add corresponding NDR test results:
940     for job, builds in table["reference"]["data"].items():
941         for build in builds:
942             for tst_name, tst_data in data[job][str(build)].iteritems():
943                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
944                     replace("-mrr", "")
945                 if tst_name_mod in tests_lst:
946                     try:
947                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
948                             if table["include-tests"] == "MRR":
949                                 result = tst_data["result"]["receive-rate"].avg
950                             elif table["include-tests"] == "PDR":
951                                 result = tst_data["throughput"]["PDR"]["LOWER"]
952                             elif table["include-tests"] == "NDR":
953                                 result = tst_data["throughput"]["NDR"]["LOWER"]
954                             else:
955                                 result = None
956                             if result is not None:
957                                 tbl_dict[tst_name_mod]["ref-data"].append(
958                                     result)
959                     except (KeyError, TypeError):
960                         continue
961
962     tbl_lst = list()
963     for tst_name in tbl_dict.keys():
964         item = [tbl_dict[tst_name]["name"], ]
965         data_r = tbl_dict[tst_name]["ref-data"]
966         if data_r:
967             data_r_mean = mean(data_r)
968             item.append(round(data_r_mean / 1000000, 2))
969             data_r_stdev = stdev(data_r)
970             item.append(round(data_r_stdev / 1000000, 2))
971         else:
972             data_r_mean = None
973             data_r_stdev = None
974             item.extend([None, None])
975         data_c = tbl_dict[tst_name]["cmp-data"]
976         if data_c:
977             data_c_mean = mean(data_c)
978             item.append(round(data_c_mean / 1000000, 2))
979             data_c_stdev = stdev(data_c)
980             item.append(round(data_c_stdev / 1000000, 2))
981         else:
982             data_c_mean = None
983             data_c_stdev = None
984             item.extend([None, None])
985         if data_r_mean and data_c_mean:
986             delta, d_stdev = relative_change_stdev(
987                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
988             item.append(round(delta, 2))
989             item.append(round(d_stdev, 2))
990             tbl_lst.append(item)
991
992     # Sort the table according to the relative change
993     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
994
995     # Generate csv tables:
996     csv_file = "{0}.csv".format(table["output-file"])
997     with open(csv_file, "w") as file_handler:
998         file_handler.write(header_str)
999         for test in tbl_lst:
1000             file_handler.write(",".join([str(item) for item in test]) + "\n")
1001
1002     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
1003
1004
1005 def table_performance_trending_dashboard(table, input_data):
1006     """Generate the table(s) with algorithm:
1007     table_performance_trending_dashboard
1008     specified in the specification file.
1009
1010     :param table: Table to generate.
1011     :param input_data: Data to process.
1012     :type table: pandas.Series
1013     :type input_data: InputData
1014     """
1015
1016     logging.info("  Generating the table {0} ...".
1017                  format(table.get("title", "")))
1018
1019     # Transform the data
1020     logging.info("    Creating the data set for the {0} '{1}'.".
1021                  format(table.get("type", ""), table.get("title", "")))
1022     data = input_data.filter_data(table, continue_on_error=True)
1023
1024     # Prepare the header of the tables
1025     header = ["Test Case",
1026               "Trend [Mpps]",
1027               "Short-Term Change [%]",
1028               "Long-Term Change [%]",
1029               "Regressions [#]",
1030               "Progressions [#]"
1031               ]
1032     header_str = ",".join(header) + "\n"
1033
1034     # Prepare data to the table:
1035     tbl_dict = dict()
1036     for job, builds in table["data"].items():
1037         for build in builds:
1038             for tst_name, tst_data in data[job][str(build)].iteritems():
1039                 if tst_name.lower() in table.get("ignore-list", list()):
1040                     continue
1041                 if tbl_dict.get(tst_name, None) is None:
1042                     groups = re.search(REGEX_NIC, tst_data["parent"])
1043                     if not groups:
1044                         continue
1045                     nic = groups.group(0)
1046                     tbl_dict[tst_name] = {
1047                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1048                         "data": OrderedDict()}
1049                 try:
1050                     tbl_dict[tst_name]["data"][str(build)] = \
1051                         tst_data["result"]["receive-rate"]
1052                 except (TypeError, KeyError):
1053                     pass  # No data in output.xml for this test
1054
1055     tbl_lst = list()
1056     for tst_name in tbl_dict.keys():
1057         data_t = tbl_dict[tst_name]["data"]
1058         if len(data_t) < 2:
1059             continue
1060
1061         classification_lst, avgs = classify_anomalies(data_t)
1062
1063         win_size = min(len(data_t), table["window"])
1064         long_win_size = min(len(data_t), table["long-trend-window"])
1065
1066         try:
1067             max_long_avg = max(
1068                 [x for x in avgs[-long_win_size:-win_size]
1069                  if not isnan(x)])
1070         except ValueError:
1071             max_long_avg = nan
1072         last_avg = avgs[-1]
1073         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1074
1075         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1076             rel_change_last = nan
1077         else:
1078             rel_change_last = round(
1079                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1080
1081         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1082             rel_change_long = nan
1083         else:
1084             rel_change_long = round(
1085                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1086
1087         if classification_lst:
1088             if isnan(rel_change_last) and isnan(rel_change_long):
1089                 continue
1090             if (isnan(last_avg) or
1091                 isnan(rel_change_last) or
1092                 isnan(rel_change_long)):
1093                 continue
1094             tbl_lst.append(
1095                 [tbl_dict[tst_name]["name"],
1096                  round(last_avg / 1000000, 2),
1097                  rel_change_last,
1098                  rel_change_long,
1099                  classification_lst[-win_size:].count("regression"),
1100                  classification_lst[-win_size:].count("progression")])
1101
1102     tbl_lst.sort(key=lambda rel: rel[0])
1103
1104     tbl_sorted = list()
1105     for nrr in range(table["window"], -1, -1):
1106         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1107         for nrp in range(table["window"], -1, -1):
1108             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1109             tbl_out.sort(key=lambda rel: rel[2])
1110             tbl_sorted.extend(tbl_out)
1111
1112     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1113
1114     logging.info("    Writing file: '{0}'".format(file_name))
1115     with open(file_name, "w") as file_handler:
1116         file_handler.write(header_str)
1117         for test in tbl_sorted:
1118             file_handler.write(",".join([str(item) for item in test]) + '\n')
1119
1120     txt_file_name = "{0}.txt".format(table["output-file"])
1121     logging.info("    Writing file: '{0}'".format(txt_file_name))
1122     convert_csv_to_pretty_txt(file_name, txt_file_name)
1123
1124
1125 def _generate_url(base, testbed, test_name):
1126     """Generate URL to a trending plot from the name of the test case.
1127
1128     :param base: The base part of URL common to all test cases.
1129     :param testbed: The testbed used for testing.
1130     :param test_name: The name of the test case.
1131     :type base: str
1132     :type testbed: str
1133     :type test_name: str
1134     :returns: The URL to the plot with the trending data for the given test
1135         case.
1136     :rtype str
1137     """
1138
1139     url = base
1140     file_name = ""
1141     anchor = ".html#"
1142     feature = ""
1143
1144     if "lbdpdk" in test_name or "lbvpp" in test_name:
1145         file_name = "link_bonding"
1146
1147     elif "114b" in test_name and "vhost" in test_name:
1148         file_name = "vts"
1149
1150     elif "testpmd" in test_name or "l3fwd" in test_name:
1151         file_name = "dpdk"
1152
1153     elif "memif" in test_name:
1154         file_name = "container_memif"
1155         feature = "-base"
1156
1157     elif "srv6" in test_name:
1158         file_name = "srv6"
1159
1160     elif "vhost" in test_name:
1161         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1162             file_name = "vm_vhost_l2"
1163             if "114b" in test_name:
1164                 feature = ""
1165             elif "l2xcbase" in test_name and "x520" in test_name:
1166                 feature = "-base-l2xc"
1167             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1168                 feature = "-base-l2bd"
1169             else:
1170                 feature = "-base"
1171         elif "ip4base" in test_name:
1172             file_name = "vm_vhost_ip4"
1173             feature = "-base"
1174
1175     elif "ipsecbasetnlsw" in test_name:
1176         file_name = "ipsecsw"
1177         feature = "-base-scale"
1178
1179     elif "ipsec" in test_name:
1180         file_name = "ipsec"
1181         feature = "-base-scale"
1182         if "hw-" in test_name:
1183             file_name = "ipsechw"
1184         elif "sw-" in test_name:
1185             file_name = "ipsecsw"
1186
1187     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1188         file_name = "ip4_tunnels"
1189         feature = "-base"
1190
1191     elif "ip4base" in test_name or "ip4scale" in test_name:
1192         file_name = "ip4"
1193         if "xl710" in test_name:
1194             feature = "-base-scale-features"
1195         elif "iacl" in test_name:
1196             feature = "-features-iacl"
1197         elif "oacl" in test_name:
1198             feature = "-features-oacl"
1199         elif "snat" in test_name or "cop" in test_name:
1200             feature = "-features"
1201         else:
1202             feature = "-base-scale"
1203
1204     elif "ip6base" in test_name or "ip6scale" in test_name:
1205         file_name = "ip6"
1206         feature = "-base-scale"
1207
1208     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1209             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1210             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1211         file_name = "l2"
1212         if "macip" in test_name:
1213             feature = "-features-macip"
1214         elif "iacl" in test_name:
1215             feature = "-features-iacl"
1216         elif "oacl" in test_name:
1217             feature = "-features-oacl"
1218         else:
1219             feature = "-base-scale"
1220
1221     if "x520" in test_name:
1222         nic = "x520-"
1223     elif "x710" in test_name:
1224         nic = "x710-"
1225     elif "xl710" in test_name:
1226         nic = "xl710-"
1227     elif "xxv710" in test_name:
1228         nic = "xxv710-"
1229     elif "vic1227" in test_name:
1230         nic = "vic1227-"
1231     elif "vic1385" in test_name:
1232         nic = "vic1385-"
1233     else:
1234         nic = ""
1235     anchor += nic
1236
1237     if "64b" in test_name:
1238         framesize = "64b"
1239     elif "78b" in test_name:
1240         framesize = "78b"
1241     elif "imix" in test_name:
1242         framesize = "imix"
1243     elif "9000b" in test_name:
1244         framesize = "9000b"
1245     elif "1518b" in test_name:
1246         framesize = "1518b"
1247     elif "114b" in test_name:
1248         framesize = "114b"
1249     else:
1250         framesize = ""
1251     anchor += framesize + '-'
1252
1253     if "1t1c" in test_name:
1254         anchor += "1t1c"
1255     elif "2t2c" in test_name:
1256         anchor += "2t2c"
1257     elif "4t4c" in test_name:
1258         anchor += "4t4c"
1259     elif "2t1c" in test_name:
1260         anchor += "2t1c"
1261     elif "4t2c" in test_name:
1262         anchor += "4t2c"
1263     elif "8t4c" in test_name:
1264         anchor += "8t4c"
1265
1266     return url + file_name + '-' + testbed + '-' + nic + framesize + \
1267         feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1268
1269
1270 def table_performance_trending_dashboard_html(table, input_data):
1271     """Generate the table(s) with algorithm:
1272     table_performance_trending_dashboard_html specified in the specification
1273     file.
1274
1275     :param table: Table to generate.
1276     :param input_data: Data to process.
1277     :type table: dict
1278     :type input_data: InputData
1279     """
1280
1281     testbed = table.get("testbed", None)
1282     if testbed is None:
1283         logging.error("The testbed is not defined for the table '{0}'.".
1284                       format(table.get("title", "")))
1285         return
1286
1287     logging.info("  Generating the table {0} ...".
1288                  format(table.get("title", "")))
1289
1290     try:
1291         with open(table["input-file"], 'rb') as csv_file:
1292             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1293             csv_lst = [item for item in csv_content]
1294     except KeyError:
1295         logging.warning("The input file is not defined.")
1296         return
1297     except csv.Error as err:
1298         logging.warning("Not possible to process the file '{0}'.\n{1}".
1299                         format(table["input-file"], err))
1300         return
1301
1302     # Table:
1303     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1304
1305     # Table header:
1306     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1307     for idx, item in enumerate(csv_lst[0]):
1308         alignment = "left" if idx == 0 else "center"
1309         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1310         th.text = item
1311
1312     # Rows:
1313     colors = {"regression": ("#ffcccc", "#ff9999"),
1314               "progression": ("#c6ecc6", "#9fdf9f"),
1315               "normal": ("#e9f1fb", "#d4e4f7")}
1316     for r_idx, row in enumerate(csv_lst[1:]):
1317         if int(row[4]):
1318             color = "regression"
1319         elif int(row[5]):
1320             color = "progression"
1321         else:
1322             color = "normal"
1323         background = colors[color][r_idx % 2]
1324         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1325
1326         # Columns:
1327         for c_idx, item in enumerate(row):
1328             alignment = "left" if c_idx == 0 else "center"
1329             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1330             # Name:
1331             if c_idx == 0:
1332                 url = _generate_url("../trending/", testbed, item)
1333                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1334                 ref.text = item
1335             else:
1336                 td.text = item
1337     try:
1338         with open(table["output-file"], 'w') as html_file:
1339             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1340             html_file.write(".. raw:: html\n\n\t")
1341             html_file.write(ET.tostring(dashboard))
1342             html_file.write("\n\t<p><br><br></p>\n")
1343     except KeyError:
1344         logging.warning("The output file is not defined.")
1345         return
1346
1347
1348 def table_last_failed_tests(table, input_data):
1349     """Generate the table(s) with algorithm: table_last_failed_tests
1350     specified in the specification file.
1351
1352     :param table: Table to generate.
1353     :param input_data: Data to process.
1354     :type table: pandas.Series
1355     :type input_data: InputData
1356     """
1357
1358     logging.info("  Generating the table {0} ...".
1359                  format(table.get("title", "")))
1360
1361     # Transform the data
1362     logging.info("    Creating the data set for the {0} '{1}'.".
1363                  format(table.get("type", ""), table.get("title", "")))
1364     data = input_data.filter_data(table, continue_on_error=True)
1365
1366     if data is None or data.empty:
1367         logging.warn("    No data for the {0} '{1}'.".
1368                      format(table.get("type", ""), table.get("title", "")))
1369         return
1370
1371     tbl_list = list()
1372     for job, builds in table["data"].items():
1373         for build in builds:
1374             build = str(build)
1375             try:
1376                 version = input_data.metadata(job, build).get("version", "")
1377             except KeyError:
1378                 logging.error("Data for {job}: {build} is not present.".
1379                               format(job=job, build=build))
1380                 return
1381             tbl_list.append(build)
1382             tbl_list.append(version)
1383             for tst_name, tst_data in data[job][build].iteritems():
1384                 if tst_data["status"] != "FAIL":
1385                     continue
1386                 groups = re.search(REGEX_NIC, tst_data["parent"])
1387                 if not groups:
1388                     continue
1389                 nic = groups.group(0)
1390                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1391
1392     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1393     logging.info("    Writing file: '{0}'".format(file_name))
1394     with open(file_name, "w") as file_handler:
1395         for test in tbl_list:
1396             file_handler.write(test + '\n')
1397
1398
1399 def table_failed_tests(table, input_data):
1400     """Generate the table(s) with algorithm: table_failed_tests
1401     specified in the specification file.
1402
1403     :param table: Table to generate.
1404     :param input_data: Data to process.
1405     :type table: pandas.Series
1406     :type input_data: InputData
1407     """
1408
1409     logging.info("  Generating the table {0} ...".
1410                  format(table.get("title", "")))
1411
1412     # Transform the data
1413     logging.info("    Creating the data set for the {0} '{1}'.".
1414                  format(table.get("type", ""), table.get("title", "")))
1415     data = input_data.filter_data(table, continue_on_error=True)
1416
1417     # Prepare the header of the tables
1418     header = ["Test Case",
1419               "Failures [#]",
1420               "Last Failure [Time]",
1421               "Last Failure [VPP-Build-Id]",
1422               "Last Failure [CSIT-Job-Build-Id]"]
1423
1424     # Generate the data for the table according to the model in the table
1425     # specification
1426
1427     now = dt.utcnow()
1428     timeperiod = timedelta(int(table.get("window", 7)))
1429
1430     tbl_dict = dict()
1431     for job, builds in table["data"].items():
1432         for build in builds:
1433             build = str(build)
1434             for tst_name, tst_data in data[job][build].iteritems():
1435                 if tst_name.lower() in table.get("ignore-list", list()):
1436                     continue
1437                 if tbl_dict.get(tst_name, None) is None:
1438                     groups = re.search(REGEX_NIC, tst_data["parent"])
1439                     if not groups:
1440                         continue
1441                     nic = groups.group(0)
1442                     tbl_dict[tst_name] = {
1443                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1444                         "data": OrderedDict()}
1445                 try:
1446                     generated = input_data.metadata(job, build).\
1447                         get("generated", "")
1448                     if not generated:
1449                         continue
1450                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1451                     if (now - then) <= timeperiod:
1452                         tbl_dict[tst_name]["data"][build] = (
1453                             tst_data["status"],
1454                             generated,
1455                             input_data.metadata(job, build).get("version", ""),
1456                             build)
1457                 except (TypeError, KeyError) as err:
1458                     logging.warning("tst_name: {} - err: {}".
1459                                     format(tst_name, repr(err)))
1460
1461     max_fails = 0
1462     tbl_lst = list()
1463     for tst_data in tbl_dict.values():
1464         fails_nr = 0
1465         for val in tst_data["data"].values():
1466             if val[0] == "FAIL":
1467                 fails_nr += 1
1468                 fails_last_date = val[1]
1469                 fails_last_vpp = val[2]
1470                 fails_last_csit = val[3]
1471         if fails_nr:
1472             max_fails = fails_nr if fails_nr > max_fails else max_fails
1473             tbl_lst.append([tst_data["name"],
1474                             fails_nr,
1475                             fails_last_date,
1476                             fails_last_vpp,
1477                             "mrr-daily-build-{0}".format(fails_last_csit)])
1478
1479     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1480     tbl_sorted = list()
1481     for nrf in range(max_fails, -1, -1):
1482         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1483         tbl_sorted.extend(tbl_fails)
1484     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1485
1486     logging.info("    Writing file: '{0}'".format(file_name))
1487     with open(file_name, "w") as file_handler:
1488         file_handler.write(",".join(header) + "\n")
1489         for test in tbl_sorted:
1490             file_handler.write(",".join([str(item) for item in test]) + '\n')
1491
1492     txt_file_name = "{0}.txt".format(table["output-file"])
1493     logging.info("    Writing file: '{0}'".format(txt_file_name))
1494     convert_csv_to_pretty_txt(file_name, txt_file_name)
1495
1496
1497 def table_failed_tests_html(table, input_data):
1498     """Generate the table(s) with algorithm: table_failed_tests_html
1499     specified in the specification file.
1500
1501     :param table: Table to generate.
1502     :param input_data: Data to process.
1503     :type table: pandas.Series
1504     :type input_data: InputData
1505     """
1506
1507     testbed = table.get("testbed", None)
1508     if testbed is None:
1509         logging.error("The testbed is not defined for the table '{0}'.".
1510                       format(table.get("title", "")))
1511         return
1512
1513     logging.info("  Generating the table {0} ...".
1514                  format(table.get("title", "")))
1515
1516     try:
1517         with open(table["input-file"], 'rb') as csv_file:
1518             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1519             csv_lst = [item for item in csv_content]
1520     except KeyError:
1521         logging.warning("The input file is not defined.")
1522         return
1523     except csv.Error as err:
1524         logging.warning("Not possible to process the file '{0}'.\n{1}".
1525                         format(table["input-file"], err))
1526         return
1527
1528     # Table:
1529     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1530
1531     # Table header:
1532     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1533     for idx, item in enumerate(csv_lst[0]):
1534         alignment = "left" if idx == 0 else "center"
1535         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1536         th.text = item
1537
1538     # Rows:
1539     colors = ("#e9f1fb", "#d4e4f7")
1540     for r_idx, row in enumerate(csv_lst[1:]):
1541         background = colors[r_idx % 2]
1542         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1543
1544         # Columns:
1545         for c_idx, item in enumerate(row):
1546             alignment = "left" if c_idx == 0 else "center"
1547             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1548             # Name:
1549             if c_idx == 0:
1550                 url = _generate_url("../trending/", testbed, item)
1551                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1552                 ref.text = item
1553             else:
1554                 td.text = item
1555     try:
1556         with open(table["output-file"], 'w') as html_file:
1557             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1558             html_file.write(".. raw:: html\n\n\t")
1559             html_file.write(ET.tostring(failed_tests))
1560             html_file.write("\n\t<p><br><br></p>\n")
1561     except KeyError:
1562         logging.warning("The output file is not defined.")
1563         return