0646db3ab828a063e0eb1daa51a98d2906ee801f
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 try:
300                     # TODO: Re-work when NDRPDRDISC tests are not used
301                     if table["include-tests"] == "MRR":
302                         tbl_dict[tst_name_mod]["cmp-data"]. \
303                             append(tst_data["result"]["receive-rate"].avg)
304                     elif table["include-tests"] == "PDR":
305                         if tst_data["type"] == "PDR":
306                             tbl_dict[tst_name_mod]["cmp-data"]. \
307                                 append(tst_data["throughput"]["value"])
308                         elif tst_data["type"] == "NDRPDR":
309                             tbl_dict[tst_name_mod]["cmp-data"].append(
310                                 tst_data["throughput"]["PDR"]["LOWER"])
311                     elif table["include-tests"] == "NDR":
312                         if tst_data["type"] == "NDR":
313                             tbl_dict[tst_name_mod]["cmp-data"]. \
314                                 append(tst_data["throughput"]["value"])
315                         elif tst_data["type"] == "NDRPDR":
316                             tbl_dict[tst_name_mod]["cmp-data"].append(
317                                 tst_data["throughput"]["NDR"]["LOWER"])
318                     else:
319                         continue
320                 except KeyError:
321                     pass
322                 except TypeError:
323                     tbl_dict.pop(tst_name_mod, None)
324     if history:
325         for item in history:
326             for job, builds in item["data"].items():
327                 for build in builds:
328                     for tst_name, tst_data in data[job][str(build)].iteritems():
329                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
330                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
331                             replace("-ndrdisc", "").replace("-pdr", ""). \
332                             replace("-ndr", "").\
333                             replace("1t1c", "1c").replace("2t1c", "1c").\
334                             replace("2t2c", "2c").replace("4t2c", "2c").\
335                             replace("4t4c", "4c").replace("8t4c", "4c")
336                         if "across topologies" in table["title"].lower():
337                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
338                         if tbl_dict.get(tst_name_mod, None) is None:
339                             continue
340                         if tbl_dict[tst_name_mod].get("history", None) is None:
341                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
342                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
343                                                              None) is None:
344                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
345                                 list()
346                         try:
347                             # TODO: Re-work when NDRPDRDISC tests are not used
348                             if table["include-tests"] == "MRR":
349                                 tbl_dict[tst_name_mod]["history"][item["title"
350                                 ]].append(tst_data["result"]["receive-rate"].
351                                           avg)
352                             elif table["include-tests"] == "PDR":
353                                 if tst_data["type"] == "PDR":
354                                     tbl_dict[tst_name_mod]["history"][
355                                         item["title"]].\
356                                         append(tst_data["throughput"]["value"])
357                                 elif tst_data["type"] == "NDRPDR":
358                                     tbl_dict[tst_name_mod]["history"][item[
359                                         "title"]].append(tst_data["throughput"][
360                                         "PDR"]["LOWER"])
361                             elif table["include-tests"] == "NDR":
362                                 if tst_data["type"] == "NDR":
363                                     tbl_dict[tst_name_mod]["history"][
364                                         item["title"]].\
365                                         append(tst_data["throughput"]["value"])
366                                 elif tst_data["type"] == "NDRPDR":
367                                     tbl_dict[tst_name_mod]["history"][item[
368                                         "title"]].append(tst_data["throughput"][
369                                         "NDR"]["LOWER"])
370                             else:
371                                 continue
372                         except (TypeError, KeyError):
373                             pass
374
375     tbl_lst = list()
376     for tst_name in tbl_dict.keys():
377         item = [tbl_dict[tst_name]["name"], ]
378         if history:
379             if tbl_dict[tst_name].get("history", None) is not None:
380                 for hist_data in tbl_dict[tst_name]["history"].values():
381                     if hist_data:
382                         item.append(round(mean(hist_data) / 1000000, 2))
383                         item.append(round(stdev(hist_data) / 1000000, 2))
384                     else:
385                         item.extend([None, None])
386             else:
387                 item.extend([None, None])
388         data_t = tbl_dict[tst_name]["ref-data"]
389         if data_t:
390             item.append(round(mean(data_t) / 1000000, 2))
391             item.append(round(stdev(data_t) / 1000000, 2))
392         else:
393             item.extend([None, None])
394         data_t = tbl_dict[tst_name]["cmp-data"]
395         if data_t:
396             item.append(round(mean(data_t) / 1000000, 2))
397             item.append(round(stdev(data_t) / 1000000, 2))
398         else:
399             item.extend([None, None])
400         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
401             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
402         if len(item) == len(header):
403             tbl_lst.append(item)
404
405     # Sort the table according to the relative change
406     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
407
408     # Generate csv tables:
409     csv_file = "{0}.csv".format(table["output-file"])
410     with open(csv_file, "w") as file_handler:
411         file_handler.write(header_str)
412         for test in tbl_lst:
413             file_handler.write(",".join([str(item) for item in test]) + "\n")
414
415     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
416
417
418 def table_nics_comparison(table, input_data):
419     """Generate the table(s) with algorithm: table_nics_comparison
420     specified in the specification file.
421
422     :param table: Table to generate.
423     :param input_data: Data to process.
424     :type table: pandas.Series
425     :type input_data: InputData
426     """
427
428     logging.info("  Generating the table {0} ...".
429                  format(table.get("title", "")))
430
431     # Transform the data
432     logging.info("    Creating the data set for the {0} '{1}'.".
433                  format(table.get("type", ""), table.get("title", "")))
434     data = input_data.filter_data(table, continue_on_error=True)
435
436     # Prepare the header of the tables
437     try:
438         header = ["Test case", ]
439
440         if table["include-tests"] == "MRR":
441             hdr_param = "Receive Rate"
442         else:
443             hdr_param = "Throughput"
444
445         header.extend(
446             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
447              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
448              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
449              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
450              "Delta [%]"])
451         header_str = ",".join(header) + "\n"
452     except (AttributeError, KeyError) as err:
453         logging.error("The model is invalid, missing parameter: {0}".
454                       format(err))
455         return
456
457     # Prepare data to the table:
458     tbl_dict = dict()
459     for job, builds in table["data"].items():
460         for build in builds:
461             for tst_name, tst_data in data[job][str(build)].iteritems():
462                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
463                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
464                     replace("-ndrdisc", "").replace("-pdr", "").\
465                     replace("-ndr", "").\
466                     replace("1t1c", "1c").replace("2t1c", "1c").\
467                     replace("2t2c", "2c").replace("4t2c", "2c").\
468                     replace("4t4c", "4c").replace("8t4c", "4c")
469                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
470                 if tbl_dict.get(tst_name_mod, None) is None:
471                     name = "-".join(tst_data["name"].split("-")[:-1])
472                     tbl_dict[tst_name_mod] = {"name": name,
473                                               "ref-data": list(),
474                                               "cmp-data": list()}
475                 try:
476                     if table["include-tests"] == "MRR":
477                         result = tst_data["result"]["receive-rate"].avg
478                     elif table["include-tests"] == "PDR":
479                         result = tst_data["throughput"]["PDR"]["LOWER"]
480                     elif table["include-tests"] == "NDR":
481                         result = tst_data["throughput"]["NDR"]["LOWER"]
482                     else:
483                         result = None
484
485                     if result:
486                         if table["reference"]["nic"] in tst_data["tags"]:
487                             tbl_dict[tst_name_mod]["ref-data"].append(result)
488                         elif table["compare"]["nic"] in tst_data["tags"]:
489                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
490                 except (TypeError, KeyError) as err:
491                     logging.debug("No data for {0}".format(tst_name))
492                     logging.debug(repr(err))
493                     # No data in output.xml for this test
494
495     tbl_lst = list()
496     for tst_name in tbl_dict.keys():
497         item = [tbl_dict[tst_name]["name"], ]
498         data_t = tbl_dict[tst_name]["ref-data"]
499         if data_t:
500             item.append(round(mean(data_t) / 1000000, 2))
501             item.append(round(stdev(data_t) / 1000000, 2))
502         else:
503             item.extend([None, None])
504         data_t = tbl_dict[tst_name]["cmp-data"]
505         if data_t:
506             item.append(round(mean(data_t) / 1000000, 2))
507             item.append(round(stdev(data_t) / 1000000, 2))
508         else:
509             item.extend([None, None])
510         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
511             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
512         if len(item) == len(header):
513             tbl_lst.append(item)
514
515     # Sort the table according to the relative change
516     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
517
518     # Generate csv tables:
519     csv_file = "{0}.csv".format(table["output-file"])
520     with open(csv_file, "w") as file_handler:
521         file_handler.write(header_str)
522         for test in tbl_lst:
523             file_handler.write(",".join([str(item) for item in test]) + "\n")
524
525     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
526
527
528 def table_performance_trending_dashboard(table, input_data):
529     """Generate the table(s) with algorithm:
530     table_performance_trending_dashboard
531     specified in the specification file.
532
533     :param table: Table to generate.
534     :param input_data: Data to process.
535     :type table: pandas.Series
536     :type input_data: InputData
537     """
538
539     logging.info("  Generating the table {0} ...".
540                  format(table.get("title", "")))
541
542     # Transform the data
543     logging.info("    Creating the data set for the {0} '{1}'.".
544                  format(table.get("type", ""), table.get("title", "")))
545     data = input_data.filter_data(table, continue_on_error=True)
546
547     # Prepare the header of the tables
548     header = ["Test Case",
549               "Trend [Mpps]",
550               "Short-Term Change [%]",
551               "Long-Term Change [%]",
552               "Regressions [#]",
553               "Progressions [#]"
554               ]
555     header_str = ",".join(header) + "\n"
556
557     # Prepare data to the table:
558     tbl_dict = dict()
559     for job, builds in table["data"].items():
560         for build in builds:
561             for tst_name, tst_data in data[job][str(build)].iteritems():
562                 if tst_name.lower() in table["ignore-list"]:
563                     continue
564                 if tbl_dict.get(tst_name, None) is None:
565                     groups = re.search(REGEX_NIC, tst_data["parent"])
566                     if not groups:
567                         continue
568                     nic = groups.group(0)
569                     tbl_dict[tst_name] = {
570                         "name": "{0}-{1}".format(nic, tst_data["name"]),
571                         "data": OrderedDict()}
572                 try:
573                     tbl_dict[tst_name]["data"][str(build)] = \
574                         tst_data["result"]["receive-rate"]
575                 except (TypeError, KeyError):
576                     pass  # No data in output.xml for this test
577
578     tbl_lst = list()
579     for tst_name in tbl_dict.keys():
580         data_t = tbl_dict[tst_name]["data"]
581         if len(data_t) < 2:
582             continue
583
584         classification_lst, avgs = classify_anomalies(data_t)
585
586         win_size = min(len(data_t), table["window"])
587         long_win_size = min(len(data_t), table["long-trend-window"])
588
589         try:
590             max_long_avg = max(
591                 [x for x in avgs[-long_win_size:-win_size]
592                  if not isnan(x)])
593         except ValueError:
594             max_long_avg = nan
595         last_avg = avgs[-1]
596         avg_week_ago = avgs[max(-win_size, -len(avgs))]
597
598         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
599             rel_change_last = nan
600         else:
601             rel_change_last = round(
602                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
603
604         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
605             rel_change_long = nan
606         else:
607             rel_change_long = round(
608                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
609
610         if classification_lst:
611             if isnan(rel_change_last) and isnan(rel_change_long):
612                 continue
613             if (isnan(last_avg) or
614                 isnan(rel_change_last) or
615                 isnan(rel_change_long)):
616                 continue
617             tbl_lst.append(
618                 [tbl_dict[tst_name]["name"],
619                  round(last_avg / 1000000, 2),
620                  rel_change_last,
621                  rel_change_long,
622                  classification_lst[-win_size:].count("regression"),
623                  classification_lst[-win_size:].count("progression")])
624
625     tbl_lst.sort(key=lambda rel: rel[0])
626
627     tbl_sorted = list()
628     for nrr in range(table["window"], -1, -1):
629         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
630         for nrp in range(table["window"], -1, -1):
631             tbl_out = [item for item in tbl_reg if item[5] == nrp]
632             tbl_out.sort(key=lambda rel: rel[2])
633             tbl_sorted.extend(tbl_out)
634
635     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
636
637     logging.info("    Writing file: '{0}'".format(file_name))
638     with open(file_name, "w") as file_handler:
639         file_handler.write(header_str)
640         for test in tbl_sorted:
641             file_handler.write(",".join([str(item) for item in test]) + '\n')
642
643     txt_file_name = "{0}.txt".format(table["output-file"])
644     logging.info("    Writing file: '{0}'".format(txt_file_name))
645     convert_csv_to_pretty_txt(file_name, txt_file_name)
646
647
648 def _generate_url(base, testbed, test_name):
649     """Generate URL to a trending plot from the name of the test case.
650
651     :param base: The base part of URL common to all test cases.
652     :param testbed: The testbed used for testing.
653     :param test_name: The name of the test case.
654     :type base: str
655     :type testbed: str
656     :type test_name: str
657     :returns: The URL to the plot with the trending data for the given test
658         case.
659     :rtype str
660     """
661
662     url = base
663     file_name = ""
664     anchor = ".html#"
665     feature = ""
666
667     if "lbdpdk" in test_name or "lbvpp" in test_name:
668         file_name = "link_bonding"
669
670     elif "114b" in test_name and "vhost" in test_name:
671         file_name = "vts"
672
673     elif "testpmd" in test_name or "l3fwd" in test_name:
674         file_name = "dpdk"
675
676     elif "memif" in test_name:
677         file_name = "container_memif"
678         feature = "-base"
679
680     elif "srv6" in test_name:
681         file_name = "srv6"
682
683     elif "vhost" in test_name:
684         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
685             file_name = "vm_vhost_l2"
686             if "114b" in test_name:
687                 feature = ""
688             elif "l2xcbase" in test_name and "x520" in test_name:
689                 feature = "-base-l2xc"
690             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
691                 feature = "-base-l2bd"
692             else:
693                 feature = "-base"
694         elif "ip4base" in test_name:
695             file_name = "vm_vhost_ip4"
696             feature = "-base"
697
698     elif "ipsec" in test_name:
699         file_name = "ipsec"
700         feature = "-base-scale"
701
702     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
703         file_name = "ip4_tunnels"
704         feature = "-base"
705
706     elif "ip4base" in test_name or "ip4scale" in test_name:
707         file_name = "ip4"
708         if "xl710" in test_name:
709             feature = "-base-scale-features"
710         elif "iacl" in test_name:
711             feature = "-features-iacl"
712         elif "oacl" in test_name:
713             feature = "-features-oacl"
714         elif "snat" in test_name or "cop" in test_name:
715             feature = "-features"
716         else:
717             feature = "-base-scale"
718
719     elif "ip6base" in test_name or "ip6scale" in test_name:
720         file_name = "ip6"
721         feature = "-base-scale"
722
723     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
724             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
725             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
726         file_name = "l2"
727         if "macip" in test_name:
728             feature = "-features-macip"
729         elif "iacl" in test_name:
730             feature = "-features-iacl"
731         elif "oacl" in test_name:
732             feature = "-features-oacl"
733         else:
734             feature = "-base-scale"
735
736     if "x520" in test_name:
737         nic = "x520-"
738     elif "x710" in test_name:
739         nic = "x710-"
740     elif "xl710" in test_name:
741         nic = "xl710-"
742     elif "xxv710" in test_name:
743         nic = "xxv710-"
744     elif "vic1227" in test_name:
745         nic = "vic1227-"
746     elif "vic1385" in test_name:
747         nic = "vic1385-"
748     else:
749         nic = ""
750     anchor += nic
751
752     if "64b" in test_name:
753         framesize = "64b"
754     elif "78b" in test_name:
755         framesize = "78b"
756     elif "imix" in test_name:
757         framesize = "imix"
758     elif "9000b" in test_name:
759         framesize = "9000b"
760     elif "1518b" in test_name:
761         framesize = "1518b"
762     elif "114b" in test_name:
763         framesize = "114b"
764     else:
765         framesize = ""
766     anchor += framesize + '-'
767
768     if "1t1c" in test_name:
769         anchor += "1t1c"
770     elif "2t2c" in test_name:
771         anchor += "2t2c"
772     elif "4t4c" in test_name:
773         anchor += "4t4c"
774     elif "2t1c" in test_name:
775         anchor += "2t1c"
776     elif "4t2c" in test_name:
777         anchor += "4t2c"
778     elif "8t4c" in test_name:
779         anchor += "8t4c"
780
781     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
782            anchor + feature
783
784
785 def table_performance_trending_dashboard_html(table, input_data):
786     """Generate the table(s) with algorithm:
787     table_performance_trending_dashboard_html specified in the specification
788     file.
789
790     :param table: Table to generate.
791     :param input_data: Data to process.
792     :type table: dict
793     :type input_data: InputData
794     """
795
796     testbed = table.get("testbed", None)
797     if testbed is None:
798         logging.error("The testbed is not defined for the table '{0}'.".
799                       format(table.get("title", "")))
800         return
801
802     logging.info("  Generating the table {0} ...".
803                  format(table.get("title", "")))
804
805     try:
806         with open(table["input-file"], 'rb') as csv_file:
807             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
808             csv_lst = [item for item in csv_content]
809     except KeyError:
810         logging.warning("The input file is not defined.")
811         return
812     except csv.Error as err:
813         logging.warning("Not possible to process the file '{0}'.\n{1}".
814                         format(table["input-file"], err))
815         return
816
817     # Table:
818     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
819
820     # Table header:
821     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
822     for idx, item in enumerate(csv_lst[0]):
823         alignment = "left" if idx == 0 else "center"
824         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
825         th.text = item
826
827     # Rows:
828     colors = {"regression": ("#ffcccc", "#ff9999"),
829               "progression": ("#c6ecc6", "#9fdf9f"),
830               "normal": ("#e9f1fb", "#d4e4f7")}
831     for r_idx, row in enumerate(csv_lst[1:]):
832         if int(row[4]):
833             color = "regression"
834         elif int(row[5]):
835             color = "progression"
836         else:
837             color = "normal"
838         background = colors[color][r_idx % 2]
839         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
840
841         # Columns:
842         for c_idx, item in enumerate(row):
843             alignment = "left" if c_idx == 0 else "center"
844             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
845             # Name:
846             if c_idx == 0:
847                 url = _generate_url("../trending/", testbed, item)
848                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
849                 ref.text = item
850             else:
851                 td.text = item
852     try:
853         with open(table["output-file"], 'w') as html_file:
854             logging.info("    Writing file: '{0}'".format(table["output-file"]))
855             html_file.write(".. raw:: html\n\n\t")
856             html_file.write(ET.tostring(dashboard))
857             html_file.write("\n\t<p><br><br></p>\n")
858     except KeyError:
859         logging.warning("The output file is not defined.")
860         return
861
862
863 def table_last_failed_tests(table, input_data):
864     """Generate the table(s) with algorithm: table_last_failed_tests
865     specified in the specification file.
866
867     :param table: Table to generate.
868     :param input_data: Data to process.
869     :type table: pandas.Series
870     :type input_data: InputData
871     """
872
873     logging.info("  Generating the table {0} ...".
874                  format(table.get("title", "")))
875
876     # Transform the data
877     logging.info("    Creating the data set for the {0} '{1}'.".
878                  format(table.get("type", ""), table.get("title", "")))
879     data = input_data.filter_data(table, continue_on_error=True)
880
881     if data is None or data.empty:
882         logging.warn("    No data for the {0} '{1}'.".
883                      format(table.get("type", ""), table.get("title", "")))
884         return
885
886     tbl_list = list()
887     for job, builds in table["data"].items():
888         for build in builds:
889             build = str(build)
890             try:
891                 version = input_data.metadata(job, build).get("version", "")
892             except KeyError:
893                 logging.error("Data for {job}: {build} is not present.".
894                               format(job=job, build=build))
895                 return
896             tbl_list.append(build)
897             tbl_list.append(version)
898             for tst_name, tst_data in data[job][build].iteritems():
899                 if tst_data["status"] != "FAIL":
900                     continue
901                 groups = re.search(REGEX_NIC, tst_data["parent"])
902                 if not groups:
903                     continue
904                 nic = groups.group(0)
905                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
906
907     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
908     logging.info("    Writing file: '{0}'".format(file_name))
909     with open(file_name, "w") as file_handler:
910         for test in tbl_list:
911             file_handler.write(test + '\n')
912
913
914 def table_failed_tests(table, input_data):
915     """Generate the table(s) with algorithm: table_failed_tests
916     specified in the specification file.
917
918     :param table: Table to generate.
919     :param input_data: Data to process.
920     :type table: pandas.Series
921     :type input_data: InputData
922     """
923
924     logging.info("  Generating the table {0} ...".
925                  format(table.get("title", "")))
926
927     # Transform the data
928     logging.info("    Creating the data set for the {0} '{1}'.".
929                  format(table.get("type", ""), table.get("title", "")))
930     data = input_data.filter_data(table, continue_on_error=True)
931
932     # Prepare the header of the tables
933     header = ["Test Case",
934               "Failures [#]",
935               "Last Failure [Time]",
936               "Last Failure [VPP-Build-Id]",
937               "Last Failure [CSIT-Job-Build-Id]"]
938
939     # Generate the data for the table according to the model in the table
940     # specification
941
942     now = dt.utcnow()
943     timeperiod = timedelta(int(table.get("window", 7)))
944
945     tbl_dict = dict()
946     for job, builds in table["data"].items():
947         for build in builds:
948             build = str(build)
949             for tst_name, tst_data in data[job][build].iteritems():
950                 if tst_name.lower() in table["ignore-list"]:
951                     continue
952                 if tbl_dict.get(tst_name, None) is None:
953                     groups = re.search(REGEX_NIC, tst_data["parent"])
954                     if not groups:
955                         continue
956                     nic = groups.group(0)
957                     tbl_dict[tst_name] = {
958                         "name": "{0}-{1}".format(nic, tst_data["name"]),
959                         "data": OrderedDict()}
960                 try:
961                     generated = input_data.metadata(job, build).\
962                         get("generated", "")
963                     if not generated:
964                         continue
965                     then = dt.strptime(generated, "%Y%m%d %H:%M")
966                     if (now - then) <= timeperiod:
967                         tbl_dict[tst_name]["data"][build] = (
968                             tst_data["status"],
969                             generated,
970                             input_data.metadata(job, build).get("version", ""),
971                             build)
972                 except (TypeError, KeyError) as err:
973                     logging.warning("tst_name: {} - err: {}".
974                                     format(tst_name, repr(err)))
975
976     max_fails = 0
977     tbl_lst = list()
978     for tst_data in tbl_dict.values():
979         fails_nr = 0
980         for val in tst_data["data"].values():
981             if val[0] == "FAIL":
982                 fails_nr += 1
983                 fails_last_date = val[1]
984                 fails_last_vpp = val[2]
985                 fails_last_csit = val[3]
986         if fails_nr:
987             max_fails = fails_nr if fails_nr > max_fails else max_fails
988             tbl_lst.append([tst_data["name"],
989                             fails_nr,
990                             fails_last_date,
991                             fails_last_vpp,
992                             "mrr-daily-build-{0}".format(fails_last_csit)])
993
994     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
995     tbl_sorted = list()
996     for nrf in range(max_fails, -1, -1):
997         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
998         tbl_sorted.extend(tbl_fails)
999     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1000
1001     logging.info("    Writing file: '{0}'".format(file_name))
1002     with open(file_name, "w") as file_handler:
1003         file_handler.write(",".join(header) + "\n")
1004         for test in tbl_sorted:
1005             file_handler.write(",".join([str(item) for item in test]) + '\n')
1006
1007     txt_file_name = "{0}.txt".format(table["output-file"])
1008     logging.info("    Writing file: '{0}'".format(txt_file_name))
1009     convert_csv_to_pretty_txt(file_name, txt_file_name)
1010
1011
1012 def table_failed_tests_html(table, input_data):
1013     """Generate the table(s) with algorithm: table_failed_tests_html
1014     specified in the specification file.
1015
1016     :param table: Table to generate.
1017     :param input_data: Data to process.
1018     :type table: pandas.Series
1019     :type input_data: InputData
1020     """
1021
1022     testbed = table.get("testbed", None)
1023     if testbed is None:
1024         logging.error("The testbed is not defined for the table '{0}'.".
1025                       format(table.get("title", "")))
1026         return
1027
1028     logging.info("  Generating the table {0} ...".
1029                  format(table.get("title", "")))
1030
1031     try:
1032         with open(table["input-file"], 'rb') as csv_file:
1033             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1034             csv_lst = [item for item in csv_content]
1035     except KeyError:
1036         logging.warning("The input file is not defined.")
1037         return
1038     except csv.Error as err:
1039         logging.warning("Not possible to process the file '{0}'.\n{1}".
1040                         format(table["input-file"], err))
1041         return
1042
1043     # Table:
1044     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1045
1046     # Table header:
1047     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1048     for idx, item in enumerate(csv_lst[0]):
1049         alignment = "left" if idx == 0 else "center"
1050         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1051         th.text = item
1052
1053     # Rows:
1054     colors = ("#e9f1fb", "#d4e4f7")
1055     for r_idx, row in enumerate(csv_lst[1:]):
1056         background = colors[r_idx % 2]
1057         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1058
1059         # Columns:
1060         for c_idx, item in enumerate(row):
1061             alignment = "left" if c_idx == 0 else "center"
1062             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1063             # Name:
1064             if c_idx == 0:
1065                 url = _generate_url("../trending/", testbed, item)
1066                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1067                 ref.text = item
1068             else:
1069                 td.text = item
1070     try:
1071         with open(table["output-file"], 'w') as html_file:
1072             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1073             html_file.write(".. raw:: html\n\n\t")
1074             html_file.write(ET.tostring(failed_tests))
1075             html_file.write("\n\t<p><br><br></p>\n")
1076     except KeyError:
1077         logging.warning("The output file is not defined.")
1078         return