Trending: Alerts
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         if column["data"].split(" ")[1] in ("conf-history",
165                                                             "show-run"):
166                             col_data = replace(col_data, " |br| ", "",
167                                                maxreplace=1)
168                             col_data = " |prein| {0} |preout| ".\
169                                 format(col_data[:-5])
170                         row_lst.append('"{0}"'.format(col_data))
171                     except KeyError:
172                         row_lst.append("No data")
173                 table_lst.append(row_lst)
174
175         # Write the data to file
176         if table_lst:
177             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178                                             table["output-file-ext"])
179             logging.info("      Writing file: '{}'".format(file_name))
180             with open(file_name, "w") as file_handler:
181                 file_handler.write(",".join(header) + "\n")
182                 for item in table_lst:
183                     file_handler.write(",".join(item) + "\n")
184
185     logging.info("  Done.")
186
187
188 def table_performance_comparison(table, input_data):
189     """Generate the table(s) with algorithm: table_performance_comparison
190     specified in the specification file.
191
192     :param table: Table to generate.
193     :param input_data: Data to process.
194     :type table: pandas.Series
195     :type input_data: InputData
196     """
197
198     logging.info("  Generating the table {0} ...".
199                  format(table.get("title", "")))
200
201     # Transform the data
202     logging.info("    Creating the data set for the {0} '{1}'.".
203                  format(table.get("type", ""), table.get("title", "")))
204     data = input_data.filter_data(table, continue_on_error=True)
205
206     # Prepare the header of the tables
207     try:
208         header = ["Test case", ]
209
210         if table["include-tests"] == "MRR":
211             hdr_param = "Receive Rate"
212         else:
213             hdr_param = "Throughput"
214
215         history = table.get("history", None)
216         if history:
217             for item in history:
218                 header.extend(
219                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220                      "{0} Stdev [Mpps]".format(item["title"])])
221         header.extend(
222             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
226              "Delta [%]"])
227         header_str = ",".join(header) + "\n"
228     except (AttributeError, KeyError) as err:
229         logging.error("The model is invalid, missing parameter: {0}".
230                       format(err))
231         return
232
233     # Prepare data to the table:
234     tbl_dict = dict()
235     for job, builds in table["reference"]["data"].items():
236         for build in builds:
237             for tst_name, tst_data in data[job][str(build)].iteritems():
238                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
240                     replace("-ndrdisc", "").replace("-pdr", "").\
241                     replace("-ndr", "").\
242                     replace("1t1c", "1c").replace("2t1c", "1c").\
243                     replace("2t2c", "2c").replace("4t2c", "2c").\
244                     replace("4t4c", "4c").replace("8t4c", "4c")
245                 if "across topologies" in table["title"].lower():
246                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
247                 if tbl_dict.get(tst_name_mod, None) is None:
248                     groups = re.search(REGEX_NIC, tst_data["parent"])
249                     nic = groups.group(0) if groups else ""
250                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
251                                                           split("-")[:-1]))
252                     if "across testbeds" in table["title"].lower() or \
253                             "across topologies" in table["title"].lower():
254                         name = name.\
255                             replace("1t1c", "1c").replace("2t1c", "1c").\
256                             replace("2t2c", "2c").replace("4t2c", "2c").\
257                             replace("4t4c", "4c").replace("8t4c", "4c")
258                     tbl_dict[tst_name_mod] = {"name": name,
259                                               "ref-data": list(),
260                                               "cmp-data": list()}
261                 try:
262                     # TODO: Re-work when NDRPDRDISC tests are not used
263                     if table["include-tests"] == "MRR":
264                         tbl_dict[tst_name_mod]["ref-data"]. \
265                             append(tst_data["result"]["receive-rate"].avg)
266                     elif table["include-tests"] == "PDR":
267                         if tst_data["type"] == "PDR":
268                             tbl_dict[tst_name_mod]["ref-data"]. \
269                                 append(tst_data["throughput"]["value"])
270                         elif tst_data["type"] == "NDRPDR":
271                             tbl_dict[tst_name_mod]["ref-data"].append(
272                                 tst_data["throughput"]["PDR"]["LOWER"])
273                     elif table["include-tests"] == "NDR":
274                         if tst_data["type"] == "NDR":
275                             tbl_dict[tst_name_mod]["ref-data"]. \
276                                 append(tst_data["throughput"]["value"])
277                         elif tst_data["type"] == "NDRPDR":
278                             tbl_dict[tst_name_mod]["ref-data"].append(
279                                 tst_data["throughput"]["NDR"]["LOWER"])
280                     else:
281                         continue
282                 except TypeError:
283                     pass  # No data in output.xml for this test
284
285     for job, builds in table["compare"]["data"].items():
286         for build in builds:
287             for tst_name, tst_data in data[job][str(build)].iteritems():
288                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
289                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
290                     replace("-ndrdisc", "").replace("-pdr", ""). \
291                     replace("-ndr", "").\
292                     replace("1t1c", "1c").replace("2t1c", "1c").\
293                     replace("2t2c", "2c").replace("4t2c", "2c").\
294                     replace("4t4c", "4c").replace("8t4c", "4c")
295                 if "across topologies" in table["title"].lower():
296                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
297                 try:
298                     # TODO: Re-work when NDRPDRDISC tests are not used
299                     if table["include-tests"] == "MRR":
300                         tbl_dict[tst_name_mod]["cmp-data"]. \
301                             append(tst_data["result"]["receive-rate"].avg)
302                     elif table["include-tests"] == "PDR":
303                         if tst_data["type"] == "PDR":
304                             tbl_dict[tst_name_mod]["cmp-data"]. \
305                                 append(tst_data["throughput"]["value"])
306                         elif tst_data["type"] == "NDRPDR":
307                             tbl_dict[tst_name_mod]["cmp-data"].append(
308                                 tst_data["throughput"]["PDR"]["LOWER"])
309                     elif table["include-tests"] == "NDR":
310                         if tst_data["type"] == "NDR":
311                             tbl_dict[tst_name_mod]["cmp-data"]. \
312                                 append(tst_data["throughput"]["value"])
313                         elif tst_data["type"] == "NDRPDR":
314                             tbl_dict[tst_name_mod]["cmp-data"].append(
315                                 tst_data["throughput"]["NDR"]["LOWER"])
316                     else:
317                         continue
318                 except KeyError:
319                     pass
320                 except TypeError:
321                     tbl_dict.pop(tst_name_mod, None)
322     if history:
323         for item in history:
324             for job, builds in item["data"].items():
325                 for build in builds:
326                     for tst_name, tst_data in data[job][str(build)].iteritems():
327                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
328                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
329                             replace("-ndrdisc", "").replace("-pdr", ""). \
330                             replace("-ndr", "").\
331                             replace("1t1c", "1c").replace("2t1c", "1c").\
332                             replace("2t2c", "2c").replace("4t2c", "2c").\
333                             replace("4t4c", "4c").replace("8t4c", "4c")
334                         if "across topologies" in table["title"].lower():
335                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
336                         if tbl_dict.get(tst_name_mod, None) is None:
337                             continue
338                         if tbl_dict[tst_name_mod].get("history", None) is None:
339                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
340                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
341                                                              None) is None:
342                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
343                                 list()
344                         try:
345                             # TODO: Re-work when NDRPDRDISC tests are not used
346                             if table["include-tests"] == "MRR":
347                                 tbl_dict[tst_name_mod]["history"][item["title"
348                                 ]].append(tst_data["result"]["receive-rate"].
349                                           avg)
350                             elif table["include-tests"] == "PDR":
351                                 if tst_data["type"] == "PDR":
352                                     tbl_dict[tst_name_mod]["history"][
353                                         item["title"]].\
354                                         append(tst_data["throughput"]["value"])
355                                 elif tst_data["type"] == "NDRPDR":
356                                     tbl_dict[tst_name_mod]["history"][item[
357                                         "title"]].append(tst_data["throughput"][
358                                         "PDR"]["LOWER"])
359                             elif table["include-tests"] == "NDR":
360                                 if tst_data["type"] == "NDR":
361                                     tbl_dict[tst_name_mod]["history"][
362                                         item["title"]].\
363                                         append(tst_data["throughput"]["value"])
364                                 elif tst_data["type"] == "NDRPDR":
365                                     tbl_dict[tst_name_mod]["history"][item[
366                                         "title"]].append(tst_data["throughput"][
367                                         "NDR"]["LOWER"])
368                             else:
369                                 continue
370                         except (TypeError, KeyError):
371                             pass
372
373     tbl_lst = list()
374     for tst_name in tbl_dict.keys():
375         item = [tbl_dict[tst_name]["name"], ]
376         if history:
377             if tbl_dict[tst_name].get("history", None) is not None:
378                 for hist_data in tbl_dict[tst_name]["history"].values():
379                     if hist_data:
380                         item.append(round(mean(hist_data) / 1000000, 2))
381                         item.append(round(stdev(hist_data) / 1000000, 2))
382                     else:
383                         item.extend([None, None])
384             else:
385                 item.extend([None, None])
386         data_t = tbl_dict[tst_name]["ref-data"]
387         if data_t:
388             item.append(round(mean(data_t) / 1000000, 2))
389             item.append(round(stdev(data_t) / 1000000, 2))
390         else:
391             item.extend([None, None])
392         data_t = tbl_dict[tst_name]["cmp-data"]
393         if data_t:
394             item.append(round(mean(data_t) / 1000000, 2))
395             item.append(round(stdev(data_t) / 1000000, 2))
396         else:
397             item.extend([None, None])
398         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
399             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
400         if len(item) == len(header):
401             tbl_lst.append(item)
402
403     # Sort the table according to the relative change
404     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
405
406     # Generate csv tables:
407     csv_file = "{0}.csv".format(table["output-file"])
408     with open(csv_file, "w") as file_handler:
409         file_handler.write(header_str)
410         for test in tbl_lst:
411             file_handler.write(",".join([str(item) for item in test]) + "\n")
412
413     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
414
415
416 def table_nics_comparison(table, input_data):
417     """Generate the table(s) with algorithm: table_nics_comparison
418     specified in the specification file.
419
420     :param table: Table to generate.
421     :param input_data: Data to process.
422     :type table: pandas.Series
423     :type input_data: InputData
424     """
425
426     logging.info("  Generating the table {0} ...".
427                  format(table.get("title", "")))
428
429     # Transform the data
430     logging.info("    Creating the data set for the {0} '{1}'.".
431                  format(table.get("type", ""), table.get("title", "")))
432     data = input_data.filter_data(table, continue_on_error=True)
433
434     # Prepare the header of the tables
435     try:
436         header = ["Test case", ]
437
438         if table["include-tests"] == "MRR":
439             hdr_param = "Receive Rate"
440         else:
441             hdr_param = "Throughput"
442
443         header.extend(
444             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
445              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
446              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
447              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
448              "Delta [%]"])
449         header_str = ",".join(header) + "\n"
450     except (AttributeError, KeyError) as err:
451         logging.error("The model is invalid, missing parameter: {0}".
452                       format(err))
453         return
454
455     # Prepare data to the table:
456     tbl_dict = dict()
457     for job, builds in table["data"].items():
458         for build in builds:
459             for tst_name, tst_data in data[job][str(build)].iteritems():
460                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
461                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
462                     replace("-ndrdisc", "").replace("-pdr", "").\
463                     replace("-ndr", "").\
464                     replace("1t1c", "1c").replace("2t1c", "1c").\
465                     replace("2t2c", "2c").replace("4t2c", "2c").\
466                     replace("4t4c", "4c").replace("8t4c", "4c")
467                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
468                 if tbl_dict.get(tst_name_mod, None) is None:
469                     name = "-".join(tst_data["name"].split("-")[:-1])
470                     tbl_dict[tst_name_mod] = {"name": name,
471                                               "ref-data": list(),
472                                               "cmp-data": list()}
473                 try:
474                     if table["include-tests"] == "MRR":
475                         result = tst_data["result"]["receive-rate"].avg
476                     elif table["include-tests"] == "PDR":
477                         result = tst_data["throughput"]["PDR"]["LOWER"]
478                     elif table["include-tests"] == "NDR":
479                         result = tst_data["throughput"]["NDR"]["LOWER"]
480                     else:
481                         result = None
482
483                     if result:
484                         if table["reference"]["nic"] in tst_data["tags"]:
485                             tbl_dict[tst_name_mod]["ref-data"].append(result)
486                         elif table["compare"]["nic"] in tst_data["tags"]:
487                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
488                 except (TypeError, KeyError) as err:
489                     logging.debug("No data for {0}".format(tst_name))
490                     logging.debug(repr(err))
491                     # No data in output.xml for this test
492
493     tbl_lst = list()
494     for tst_name in tbl_dict.keys():
495         item = [tbl_dict[tst_name]["name"], ]
496         data_t = tbl_dict[tst_name]["ref-data"]
497         if data_t:
498             item.append(round(mean(data_t) / 1000000, 2))
499             item.append(round(stdev(data_t) / 1000000, 2))
500         else:
501             item.extend([None, None])
502         data_t = tbl_dict[tst_name]["cmp-data"]
503         if data_t:
504             item.append(round(mean(data_t) / 1000000, 2))
505             item.append(round(stdev(data_t) / 1000000, 2))
506         else:
507             item.extend([None, None])
508         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
509             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
510         if len(item) == len(header):
511             tbl_lst.append(item)
512
513     # Sort the table according to the relative change
514     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
515
516     # Generate csv tables:
517     csv_file = "{0}.csv".format(table["output-file"])
518     with open(csv_file, "w") as file_handler:
519         file_handler.write(header_str)
520         for test in tbl_lst:
521             file_handler.write(",".join([str(item) for item in test]) + "\n")
522
523     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
524
525
526 def table_performance_trending_dashboard(table, input_data):
527     """Generate the table(s) with algorithm:
528     table_performance_trending_dashboard
529     specified in the specification file.
530
531     :param table: Table to generate.
532     :param input_data: Data to process.
533     :type table: pandas.Series
534     :type input_data: InputData
535     """
536
537     logging.info("  Generating the table {0} ...".
538                  format(table.get("title", "")))
539
540     # Transform the data
541     logging.info("    Creating the data set for the {0} '{1}'.".
542                  format(table.get("type", ""), table.get("title", "")))
543     data = input_data.filter_data(table, continue_on_error=True)
544
545     # Prepare the header of the tables
546     header = ["Test Case",
547               "Trend [Mpps]",
548               "Short-Term Change [%]",
549               "Long-Term Change [%]",
550               "Regressions [#]",
551               "Progressions [#]"
552               ]
553     header_str = ",".join(header) + "\n"
554
555     # Prepare data to the table:
556     tbl_dict = dict()
557     for job, builds in table["data"].items():
558         for build in builds:
559             for tst_name, tst_data in data[job][str(build)].iteritems():
560                 if tst_name.lower() in table["ignore-list"]:
561                     continue
562                 if tbl_dict.get(tst_name, None) is None:
563                     groups = re.search(REGEX_NIC, tst_data["parent"])
564                     if not groups:
565                         continue
566                     nic = groups.group(0)
567                     tbl_dict[tst_name] = {
568                         "name": "{0}-{1}".format(nic, tst_data["name"]),
569                         "data": OrderedDict()}
570                 try:
571                     tbl_dict[tst_name]["data"][str(build)] = \
572                         tst_data["result"]["receive-rate"]
573                 except (TypeError, KeyError):
574                     pass  # No data in output.xml for this test
575
576     tbl_lst = list()
577     for tst_name in tbl_dict.keys():
578         data_t = tbl_dict[tst_name]["data"]
579         if len(data_t) < 2:
580             continue
581
582         classification_lst, avgs = classify_anomalies(data_t)
583
584         win_size = min(len(data_t), table["window"])
585         long_win_size = min(len(data_t), table["long-trend-window"])
586
587         try:
588             max_long_avg = max(
589                 [x for x in avgs[-long_win_size:-win_size]
590                  if not isnan(x)])
591         except ValueError:
592             max_long_avg = nan
593         last_avg = avgs[-1]
594         avg_week_ago = avgs[max(-win_size, -len(avgs))]
595
596         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
597             rel_change_last = nan
598         else:
599             rel_change_last = round(
600                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
601
602         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
603             rel_change_long = nan
604         else:
605             rel_change_long = round(
606                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
607
608         if classification_lst:
609             if isnan(rel_change_last) and isnan(rel_change_long):
610                 continue
611             if (isnan(last_avg) or
612                 isnan(rel_change_last) or
613                 isnan(rel_change_long)):
614                 continue
615             tbl_lst.append(
616                 [tbl_dict[tst_name]["name"],
617                  round(last_avg / 1000000, 2),
618                  rel_change_last,
619                  rel_change_long,
620                  classification_lst[-win_size:].count("regression"),
621                  classification_lst[-win_size:].count("progression")])
622
623     tbl_lst.sort(key=lambda rel: rel[0])
624
625     tbl_sorted = list()
626     for nrr in range(table["window"], -1, -1):
627         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
628         for nrp in range(table["window"], -1, -1):
629             tbl_out = [item for item in tbl_reg if item[5] == nrp]
630             tbl_out.sort(key=lambda rel: rel[2])
631             tbl_sorted.extend(tbl_out)
632
633     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
634
635     logging.info("    Writing file: '{0}'".format(file_name))
636     with open(file_name, "w") as file_handler:
637         file_handler.write(header_str)
638         for test in tbl_sorted:
639             file_handler.write(",".join([str(item) for item in test]) + '\n')
640
641     txt_file_name = "{0}.txt".format(table["output-file"])
642     logging.info("    Writing file: '{0}'".format(txt_file_name))
643     convert_csv_to_pretty_txt(file_name, txt_file_name)
644
645
646 def _generate_url(base, testbed, test_name):
647     """Generate URL to a trending plot from the name of the test case.
648
649     :param base: The base part of URL common to all test cases.
650     :param testbed: The testbed used for testing.
651     :param test_name: The name of the test case.
652     :type base: str
653     :type testbed: str
654     :type test_name: str
655     :returns: The URL to the plot with the trending data for the given test
656         case.
657     :rtype str
658     """
659
660     url = base
661     file_name = ""
662     anchor = ".html#"
663     feature = ""
664
665     if "lbdpdk" in test_name or "lbvpp" in test_name:
666         file_name = "link_bonding"
667
668     elif "114b" in test_name and "vhost" in test_name:
669         file_name = "vts"
670
671     elif "testpmd" in test_name or "l3fwd" in test_name:
672         file_name = "dpdk"
673
674     elif "memif" in test_name:
675         file_name = "container_memif"
676         feature = "-base"
677
678     elif "srv6" in test_name:
679         file_name = "srv6"
680
681     elif "vhost" in test_name:
682         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
683             file_name = "vm_vhost_l2"
684             if "114b" in test_name:
685                 feature = ""
686             elif "l2xcbase" in test_name and "x520" in test_name:
687                 feature = "-base-l2xc"
688             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
689                 feature = "-base-l2bd"
690             else:
691                 feature = "-base"
692         elif "ip4base" in test_name:
693             file_name = "vm_vhost_ip4"
694             feature = "-base"
695
696     elif "ipsec" in test_name:
697         file_name = "ipsec"
698         feature = "-base-scale"
699
700     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
701         file_name = "ip4_tunnels"
702         feature = "-base"
703
704     elif "ip4base" in test_name or "ip4scale" in test_name:
705         file_name = "ip4"
706         if "xl710" in test_name:
707             feature = "-base-scale-features"
708         elif "iacl" in test_name:
709             feature = "-features-iacl"
710         elif "oacl" in test_name:
711             feature = "-features-oacl"
712         elif "snat" in test_name or "cop" in test_name:
713             feature = "-features"
714         else:
715             feature = "-base-scale"
716
717     elif "ip6base" in test_name or "ip6scale" in test_name:
718         file_name = "ip6"
719         feature = "-base-scale"
720
721     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
722             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
723             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
724         file_name = "l2"
725         if "macip" in test_name:
726             feature = "-features-macip"
727         elif "iacl" in test_name:
728             feature = "-features-iacl"
729         elif "oacl" in test_name:
730             feature = "-features-oacl"
731         else:
732             feature = "-base-scale"
733
734     if "x520" in test_name:
735         nic = "x520-"
736     elif "x710" in test_name:
737         nic = "x710-"
738     elif "xl710" in test_name:
739         nic = "xl710-"
740     elif "xxv710" in test_name:
741         nic = "xxv710-"
742     elif "vic1227" in test_name:
743         nic = "vic1227-"
744     elif "vic1385" in test_name:
745         nic = "vic1385-"
746     else:
747         nic = ""
748     anchor += nic
749
750     if "64b" in test_name:
751         framesize = "64b"
752     elif "78b" in test_name:
753         framesize = "78b"
754     elif "imix" in test_name:
755         framesize = "imix"
756     elif "9000b" in test_name:
757         framesize = "9000b"
758     elif "1518b" in test_name:
759         framesize = "1518b"
760     elif "114b" in test_name:
761         framesize = "114b"
762     else:
763         framesize = ""
764     anchor += framesize + '-'
765
766     if "1t1c" in test_name:
767         anchor += "1t1c"
768     elif "2t2c" in test_name:
769         anchor += "2t2c"
770     elif "4t4c" in test_name:
771         anchor += "4t4c"
772     elif "2t1c" in test_name:
773         anchor += "2t1c"
774     elif "4t2c" in test_name:
775         anchor += "4t2c"
776     elif "8t4c" in test_name:
777         anchor += "8t4c"
778
779     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
780            anchor + feature
781
782
783 def table_performance_trending_dashboard_html(table, input_data):
784     """Generate the table(s) with algorithm:
785     table_performance_trending_dashboard_html specified in the specification
786     file.
787
788     :param table: Table to generate.
789     :param input_data: Data to process.
790     :type table: dict
791     :type input_data: InputData
792     """
793
794     testbed = table.get("testbed", None)
795     if testbed is None:
796         logging.error("The testbed is not defined for the table '{0}'.".
797                       format(table.get("title", "")))
798         return
799
800     logging.info("  Generating the table {0} ...".
801                  format(table.get("title", "")))
802
803     try:
804         with open(table["input-file"], 'rb') as csv_file:
805             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
806             csv_lst = [item for item in csv_content]
807     except KeyError:
808         logging.warning("The input file is not defined.")
809         return
810     except csv.Error as err:
811         logging.warning("Not possible to process the file '{0}'.\n{1}".
812                         format(table["input-file"], err))
813         return
814
815     # Table:
816     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
817
818     # Table header:
819     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
820     for idx, item in enumerate(csv_lst[0]):
821         alignment = "left" if idx == 0 else "center"
822         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
823         th.text = item
824
825     # Rows:
826     colors = {"regression": ("#ffcccc", "#ff9999"),
827               "progression": ("#c6ecc6", "#9fdf9f"),
828               "normal": ("#e9f1fb", "#d4e4f7")}
829     for r_idx, row in enumerate(csv_lst[1:]):
830         if int(row[4]):
831             color = "regression"
832         elif int(row[5]):
833             color = "progression"
834         else:
835             color = "normal"
836         background = colors[color][r_idx % 2]
837         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
838
839         # Columns:
840         for c_idx, item in enumerate(row):
841             alignment = "left" if c_idx == 0 else "center"
842             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
843             # Name:
844             if c_idx == 0:
845                 url = _generate_url("../trending/", testbed, item)
846                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
847                 ref.text = item
848             else:
849                 td.text = item
850     try:
851         with open(table["output-file"], 'w') as html_file:
852             logging.info("    Writing file: '{0}'".format(table["output-file"]))
853             html_file.write(".. raw:: html\n\n\t")
854             html_file.write(ET.tostring(dashboard))
855             html_file.write("\n\t<p><br><br></p>\n")
856     except KeyError:
857         logging.warning("The output file is not defined.")
858         return
859
860
861 def table_last_failed_tests(table, input_data):
862     """Generate the table(s) with algorithm: table_last_failed_tests
863     specified in the specification file.
864
865     :param table: Table to generate.
866     :param input_data: Data to process.
867     :type table: pandas.Series
868     :type input_data: InputData
869     """
870
871     logging.info("  Generating the table {0} ...".
872                  format(table.get("title", "")))
873
874     # Transform the data
875     logging.info("    Creating the data set for the {0} '{1}'.".
876                  format(table.get("type", ""), table.get("title", "")))
877     data = input_data.filter_data(table, continue_on_error=True)
878
879     if data is None or data.empty:
880         logging.warn("    No data for the {0} '{1}'.".
881                      format(table.get("type", ""), table.get("title", "")))
882         return
883
884     tbl_list = list()
885     for job, builds in table["data"].items():
886         for build in builds:
887             build = str(build)
888             tbl_list.append(build)
889             tbl_list.append(input_data.metadata(job, build).get("version", ""))
890             for tst_name, tst_data in data[job][build].iteritems():
891                 if tst_data["status"] != "FAIL":
892                     continue
893                 groups = re.search(REGEX_NIC, tst_data["parent"])
894                 if not groups:
895                     continue
896                 nic = groups.group(0)
897                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
898
899     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
900     logging.info("    Writing file: '{0}'".format(file_name))
901     with open(file_name, "w") as file_handler:
902         for test in tbl_list:
903             file_handler.write(test + '\n')
904
905
906 def table_failed_tests(table, input_data):
907     """Generate the table(s) with algorithm: table_failed_tests
908     specified in the specification file.
909
910     :param table: Table to generate.
911     :param input_data: Data to process.
912     :type table: pandas.Series
913     :type input_data: InputData
914     """
915
916     logging.info("  Generating the table {0} ...".
917                  format(table.get("title", "")))
918
919     # Transform the data
920     logging.info("    Creating the data set for the {0} '{1}'.".
921                  format(table.get("type", ""), table.get("title", "")))
922     data = input_data.filter_data(table, continue_on_error=True)
923
924     # Prepare the header of the tables
925     header = ["Test Case",
926               "Failures [#]",
927               "Last Failure [Time]",
928               "Last Failure [VPP-Build-Id]",
929               "Last Failure [CSIT-Job-Build-Id]"]
930
931     # Generate the data for the table according to the model in the table
932     # specification
933
934     now = dt.utcnow()
935     timeperiod = timedelta(int(table.get("window", 7)))
936
937     tbl_dict = dict()
938     for job, builds in table["data"].items():
939         for build in builds:
940             build = str(build)
941             for tst_name, tst_data in data[job][build].iteritems():
942                 if tst_name.lower() in table["ignore-list"]:
943                     continue
944                 if tbl_dict.get(tst_name, None) is None:
945                     groups = re.search(REGEX_NIC, tst_data["parent"])
946                     if not groups:
947                         continue
948                     nic = groups.group(0)
949                     tbl_dict[tst_name] = {
950                         "name": "{0}-{1}".format(nic, tst_data["name"]),
951                         "data": OrderedDict()}
952                 try:
953                     generated = input_data.metadata(job, build).\
954                         get("generated", "")
955                     if not generated:
956                         continue
957                     then = dt.strptime(generated, "%Y%m%d %H:%M")
958                     if (now - then) <= timeperiod:
959                         tbl_dict[tst_name]["data"][build] = (
960                             tst_data["status"],
961                             generated,
962                             input_data.metadata(job, build).get("version", ""),
963                             build)
964                 except (TypeError, KeyError) as err:
965                     logging.warning("tst_name: {} - err: {}".
966                                     format(tst_name, repr(err)))
967
968     max_fails = 0
969     tbl_lst = list()
970     for tst_data in tbl_dict.values():
971         fails_nr = 0
972         for val in tst_data["data"].values():
973             if val[0] == "FAIL":
974                 fails_nr += 1
975                 fails_last_date = val[1]
976                 fails_last_vpp = val[2]
977                 fails_last_csit = val[3]
978         if fails_nr:
979             max_fails = fails_nr if fails_nr > max_fails else max_fails
980             tbl_lst.append([tst_data["name"],
981                             fails_nr,
982                             fails_last_date,
983                             fails_last_vpp,
984                             "mrr-daily-build-{0}".format(fails_last_csit)])
985
986     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
987     tbl_sorted = list()
988     for nrf in range(max_fails, -1, -1):
989         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
990         tbl_sorted.extend(tbl_fails)
991     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
992
993     logging.info("    Writing file: '{0}'".format(file_name))
994     with open(file_name, "w") as file_handler:
995         file_handler.write(",".join(header) + "\n")
996         for test in tbl_sorted:
997             file_handler.write(",".join([str(item) for item in test]) + '\n')
998
999     txt_file_name = "{0}.txt".format(table["output-file"])
1000     logging.info("    Writing file: '{0}'".format(txt_file_name))
1001     convert_csv_to_pretty_txt(file_name, txt_file_name)
1002
1003
1004 def table_failed_tests_html(table, input_data):
1005     """Generate the table(s) with algorithm: table_failed_tests_html
1006     specified in the specification file.
1007
1008     :param table: Table to generate.
1009     :param input_data: Data to process.
1010     :type table: pandas.Series
1011     :type input_data: InputData
1012     """
1013
1014     testbed = table.get("testbed", None)
1015     if testbed is None:
1016         logging.error("The testbed is not defined for the table '{0}'.".
1017                       format(table.get("title", "")))
1018         return
1019
1020     logging.info("  Generating the table {0} ...".
1021                  format(table.get("title", "")))
1022
1023     try:
1024         with open(table["input-file"], 'rb') as csv_file:
1025             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1026             csv_lst = [item for item in csv_content]
1027     except KeyError:
1028         logging.warning("The input file is not defined.")
1029         return
1030     except csv.Error as err:
1031         logging.warning("Not possible to process the file '{0}'.\n{1}".
1032                         format(table["input-file"], err))
1033         return
1034
1035     # Table:
1036     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1037
1038     # Table header:
1039     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1040     for idx, item in enumerate(csv_lst[0]):
1041         alignment = "left" if idx == 0 else "center"
1042         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1043         th.text = item
1044
1045     # Rows:
1046     colors = ("#e9f1fb", "#d4e4f7")
1047     for r_idx, row in enumerate(csv_lst[1:]):
1048         background = colors[r_idx % 2]
1049         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1050
1051         # Columns:
1052         for c_idx, item in enumerate(row):
1053             alignment = "left" if c_idx == 0 else "center"
1054             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1055             # Name:
1056             if c_idx == 0:
1057                 url = _generate_url("../trending/", testbed, item)
1058                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1059                 ref.text = item
1060             else:
1061                 td.text = item
1062     try:
1063         with open(table["output-file"], 'w') as html_file:
1064             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1065             html_file.write(".. raw:: html\n\n\t")
1066             html_file.write(ET.tostring(failed_tests))
1067             html_file.write("\n\t<p><br><br></p>\n")
1068     except KeyError:
1069         logging.warning("The output file is not defined.")
1070         return