CSIT-1197: Add Comparison Across Testbeds to the Report
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
25
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 def generate_tables(spec, data):
32     """Generate all tables specified in the specification file.
33
34     :param spec: Specification read from the specification file.
35     :param data: Data to process.
36     :type spec: Specification
37     :type data: InputData
38     """
39
40     logging.info("Generating the tables ...")
41     for table in spec.tables:
42         try:
43             eval(table["algorithm"])(table, data)
44         except NameError as err:
45             logging.error("Probably algorithm '{alg}' is not defined: {err}".
46                           format(alg=table["algorithm"], err=repr(err)))
47     logging.info("Done.")
48
49
50 def table_details(table, input_data):
51     """Generate the table(s) with algorithm: table_detailed_test_results
52     specified in the specification file.
53
54     :param table: Table to generate.
55     :param input_data: Data to process.
56     :type table: pandas.Series
57     :type input_data: InputData
58     """
59
60     logging.info("  Generating the table {0} ...".
61                  format(table.get("title", "")))
62
63     # Transform the data
64     logging.info("    Creating the data set for the {0} '{1}'.".
65                  format(table.get("type", ""), table.get("title", "")))
66     data = input_data.filter_data(table)
67
68     # Prepare the header of the tables
69     header = list()
70     for column in table["columns"]:
71         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72
73     # Generate the data for the table according to the model in the table
74     # specification
75     job = table["data"].keys()[0]
76     build = str(table["data"][job][0])
77     try:
78         suites = input_data.suites(job, build)
79     except KeyError:
80         logging.error("    No data available. The table will not be generated.")
81         return
82
83     for suite_longname, suite in suites.iteritems():
84         # Generate data
85         suite_name = suite["name"]
86         table_lst = list()
87         for test in data[job][build].keys():
88             if data[job][build][test]["parent"] in suite_name:
89                 row_lst = list()
90                 for column in table["columns"]:
91                     try:
92                         col_data = str(data[job][build][test][column["data"].
93                                        split(" ")[1]]).replace('"', '""')
94                         if column["data"].split(" ")[1] in ("vat-history",
95                                                             "show-run"):
96                             col_data = replace(col_data, " |br| ", "",
97                                                maxreplace=1)
98                             col_data = " |prein| {0} |preout| ".\
99                                 format(col_data[:-5])
100                         row_lst.append('"{0}"'.format(col_data))
101                     except KeyError:
102                         row_lst.append("No data")
103                 table_lst.append(row_lst)
104
105         # Write the data to file
106         if table_lst:
107             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108                                             table["output-file-ext"])
109             logging.info("      Writing file: '{}'".format(file_name))
110             with open(file_name, "w") as file_handler:
111                 file_handler.write(",".join(header) + "\n")
112                 for item in table_lst:
113                     file_handler.write(",".join(item) + "\n")
114
115     logging.info("  Done.")
116
117
118 def table_merged_details(table, input_data):
119     """Generate the table(s) with algorithm: table_merged_details
120     specified in the specification file.
121
122     :param table: Table to generate.
123     :param input_data: Data to process.
124     :type table: pandas.Series
125     :type input_data: InputData
126     """
127
128     logging.info("  Generating the table {0} ...".
129                  format(table.get("title", "")))
130
131     # Transform the data
132     logging.info("    Creating the data set for the {0} '{1}'.".
133                  format(table.get("type", ""), table.get("title", "")))
134     data = input_data.filter_data(table)
135     data = input_data.merge_data(data)
136     data.sort_index(inplace=True)
137
138     logging.info("    Creating the data set for the {0} '{1}'.".
139                  format(table.get("type", ""), table.get("title", "")))
140     suites = input_data.filter_data(table, data_set="suites")
141     suites = input_data.merge_data(suites)
142
143     # Prepare the header of the tables
144     header = list()
145     for column in table["columns"]:
146         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
147
148     for _, suite in suites.iteritems():
149         # Generate data
150         suite_name = suite["name"]
151         table_lst = list()
152         for test in data.keys():
153             if data[test]["parent"] in suite_name:
154                 row_lst = list()
155                 for column in table["columns"]:
156                     try:
157                         col_data = str(data[test][column["data"].
158                                        split(" ")[1]]).replace('"', '""')
159                         if column["data"].split(" ")[1] in ("vat-history",
160                                                             "show-run"):
161                             col_data = replace(col_data, " |br| ", "",
162                                                maxreplace=1)
163                             col_data = " |prein| {0} |preout| ".\
164                                 format(col_data[:-5])
165                         row_lst.append('"{0}"'.format(col_data))
166                     except KeyError:
167                         row_lst.append("No data")
168                 table_lst.append(row_lst)
169
170         # Write the data to file
171         if table_lst:
172             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173                                             table["output-file-ext"])
174             logging.info("      Writing file: '{}'".format(file_name))
175             with open(file_name, "w") as file_handler:
176                 file_handler.write(",".join(header) + "\n")
177                 for item in table_lst:
178                     file_handler.write(",".join(item) + "\n")
179
180     logging.info("  Done.")
181
182
183 def table_performance_improvements(table, input_data):
184     """Generate the table(s) with algorithm: table_performance_improvements
185     specified in the specification file.
186
187     # FIXME: Not used now.
188
189     :param table: Table to generate.
190     :param input_data: Data to process.
191     :type table: pandas.Series
192     :type input_data: InputData
193     """
194
195     def _write_line_to_file(file_handler, data):
196         """Write a line to the .csv file.
197
198         :param file_handler: File handler for the csv file. It must be open for
199          writing text.
200         :param data: Item to be written to the file.
201         :type file_handler: BinaryIO
202         :type data: list
203         """
204
205         line_lst = list()
206         for item in data:
207             if isinstance(item["data"], str):
208                 # Remove -?drdisc from the end
209                 if item["data"].endswith("drdisc"):
210                     item["data"] = item["data"][:-8]
211                 line_lst.append(item["data"])
212             elif isinstance(item["data"], float):
213                 line_lst.append("{:.1f}".format(item["data"]))
214             elif item["data"] is None:
215                 line_lst.append("")
216         file_handler.write(",".join(line_lst) + "\n")
217
218     logging.info("  Generating the table {0} ...".
219                  format(table.get("title", "")))
220
221     # Read the template
222     file_name = table.get("template", None)
223     if file_name:
224         try:
225             tmpl = _read_csv_template(file_name)
226         except PresentationError:
227             logging.error("  The template '{0}' does not exist. Skipping the "
228                           "table.".format(file_name))
229             return None
230     else:
231         logging.error("The template is not defined. Skipping the table.")
232         return None
233
234     # Transform the data
235     logging.info("    Creating the data set for the {0} '{1}'.".
236                  format(table.get("type", ""), table.get("title", "")))
237     data = input_data.filter_data(table)
238
239     # Prepare the header of the tables
240     header = list()
241     for column in table["columns"]:
242         header.append(column["title"])
243
244     # Generate the data for the table according to the model in the table
245     # specification
246     tbl_lst = list()
247     for tmpl_item in tmpl:
248         tbl_item = list()
249         for column in table["columns"]:
250             cmd = column["data"].split(" ")[0]
251             args = column["data"].split(" ")[1:]
252             if cmd == "template":
253                 try:
254                     val = float(tmpl_item[int(args[0])])
255                 except ValueError:
256                     val = tmpl_item[int(args[0])]
257                 tbl_item.append({"data": val})
258             elif cmd == "data":
259                 jobs = args[0:-1]
260                 operation = args[-1]
261                 data_lst = list()
262                 for job in jobs:
263                     for build in data[job]:
264                         try:
265                             data_lst.append(float(build[tmpl_item[0]]
266                                                   ["throughput"]["value"]))
267                         except (KeyError, TypeError):
268                             # No data, ignore
269                             continue
270                 if data_lst:
271                     tbl_item.append({"data": (eval(operation)(data_lst)) /
272                                              1000000})
273                 else:
274                     tbl_item.append({"data": None})
275             elif cmd == "operation":
276                 operation = args[0]
277                 try:
278                     nr1 = float(tbl_item[int(args[1])]["data"])
279                     nr2 = float(tbl_item[int(args[2])]["data"])
280                     if nr1 and nr2:
281                         tbl_item.append({"data": eval(operation)(nr1, nr2)})
282                     else:
283                         tbl_item.append({"data": None})
284                 except (IndexError, ValueError, TypeError):
285                     logging.error("No data for {0}".format(tbl_item[0]["data"]))
286                     tbl_item.append({"data": None})
287                     continue
288             else:
289                 logging.error("Not supported command {0}. Skipping the table.".
290                               format(cmd))
291                 return None
292         tbl_lst.append(tbl_item)
293
294     # Sort the table according to the relative change
295     tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
296
297     # Create the tables and write them to the files
298     file_names = [
299         "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
300         "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
301         "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
302         "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
303     ]
304
305     for file_name in file_names:
306         logging.info("    Writing the file '{0}'".format(file_name))
307         with open(file_name, "w") as file_handler:
308             file_handler.write(",".join(header) + "\n")
309             for item in tbl_lst:
310                 if isinstance(item[-1]["data"], float):
311                     rel_change = round(item[-1]["data"], 1)
312                 else:
313                     rel_change = item[-1]["data"]
314                 if "ndr_top" in file_name \
315                         and "ndr" in item[0]["data"] \
316                         and rel_change >= 10.0:
317                     _write_line_to_file(file_handler, item)
318                 elif "pdr_top" in file_name \
319                         and "pdr" in item[0]["data"] \
320                         and rel_change >= 10.0:
321                     _write_line_to_file(file_handler, item)
322                 elif "ndr_low" in file_name \
323                         and "ndr" in item[0]["data"] \
324                         and rel_change < 10.0:
325                     _write_line_to_file(file_handler, item)
326                 elif "pdr_low" in file_name \
327                         and "pdr" in item[0]["data"] \
328                         and rel_change < 10.0:
329                     _write_line_to_file(file_handler, item)
330
331     logging.info("  Done.")
332
333
334 def _read_csv_template(file_name):
335     """Read the template from a .csv file.
336
337     # FIXME: Not used now.
338
339     :param file_name: Name / full path / relative path of the file to read.
340     :type file_name: str
341     :returns: Data from the template as list (lines) of lists (items on line).
342     :rtype: list
343     :raises: PresentationError if it is not possible to read the file.
344     """
345
346     try:
347         with open(file_name, 'r') as csv_file:
348             tmpl_data = list()
349             for line in csv_file:
350                 tmpl_data.append(line[:-1].split(","))
351         return tmpl_data
352     except IOError as err:
353         raise PresentationError(str(err), level="ERROR")
354
355
356 def table_performance_comparison(table, input_data):
357     """Generate the table(s) with algorithm: table_performance_comparison
358     specified in the specification file.
359
360     :param table: Table to generate.
361     :param input_data: Data to process.
362     :type table: pandas.Series
363     :type input_data: InputData
364     """
365
366     logging.info("  Generating the table {0} ...".
367                  format(table.get("title", "")))
368
369     # Transform the data
370     logging.info("    Creating the data set for the {0} '{1}'.".
371                  format(table.get("type", ""), table.get("title", "")))
372     data = input_data.filter_data(table, continue_on_error=True)
373
374     # Prepare the header of the tables
375     try:
376         header = ["Test case", ]
377
378         if table["include-tests"] == "MRR":
379             hdr_param = "Receive Rate"
380         else:
381             hdr_param = "Throughput"
382
383         history = table.get("history", None)
384         if history:
385             for item in history:
386                 header.extend(
387                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
388                      "{0} Stdev [Mpps]".format(item["title"])])
389         header.extend(
390             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
391              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
392              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
393              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
394              "Delta [%]"])
395         header_str = ",".join(header) + "\n"
396     except (AttributeError, KeyError) as err:
397         logging.error("The model is invalid, missing parameter: {0}".
398                       format(err))
399         return
400
401     # Prepare data to the table:
402     tbl_dict = dict()
403     for job, builds in table["reference"]["data"].items():
404         for build in builds:
405             for tst_name, tst_data in data[job][str(build)].iteritems():
406                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
407                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
408                     replace("-ndrdisc", "").replace("-pdr", "").\
409                     replace("-ndr", "")
410                 if tbl_dict.get(tst_name_mod, None) is None:
411                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
412                                             "-".join(tst_data["name"].
413                                                      split("-")[:-1]))
414                     tbl_dict[tst_name_mod] = {"name": name,
415                                               "ref-data": list(),
416                                               "cmp-data": list()}
417                 try:
418                     # TODO: Re-work when NDRPDRDISC tests are not used
419                     if table["include-tests"] == "MRR":
420                         tbl_dict[tst_name_mod]["ref-data"]. \
421                             append(tst_data["result"]["receive-rate"].avg)
422                     elif table["include-tests"] == "PDR":
423                         if tst_data["type"] == "PDR":
424                             tbl_dict[tst_name_mod]["ref-data"]. \
425                                 append(tst_data["throughput"]["value"])
426                         elif tst_data["type"] == "NDRPDR":
427                             tbl_dict[tst_name_mod]["ref-data"].append(
428                                 tst_data["throughput"]["PDR"]["LOWER"])
429                     elif table["include-tests"] == "NDR":
430                         if tst_data["type"] == "NDR":
431                             tbl_dict[tst_name_mod]["ref-data"]. \
432                                 append(tst_data["throughput"]["value"])
433                         elif tst_data["type"] == "NDRPDR":
434                             tbl_dict[tst_name_mod]["ref-data"].append(
435                                 tst_data["throughput"]["NDR"]["LOWER"])
436                     else:
437                         continue
438                 except TypeError:
439                     pass  # No data in output.xml for this test
440
441     for job, builds in table["compare"]["data"].items():
442         for build in builds:
443             for tst_name, tst_data in data[job][str(build)].iteritems():
444                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
445                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
446                     replace("-ndrdisc", "").replace("-pdr", ""). \
447                     replace("-ndr", "")
448                 try:
449                     # TODO: Re-work when NDRPDRDISC tests are not used
450                     if table["include-tests"] == "MRR":
451                         tbl_dict[tst_name_mod]["cmp-data"]. \
452                             append(tst_data["result"]["receive-rate"].avg)
453                     elif table["include-tests"] == "PDR":
454                         if tst_data["type"] == "PDR":
455                             tbl_dict[tst_name_mod]["cmp-data"]. \
456                                 append(tst_data["throughput"]["value"])
457                         elif tst_data["type"] == "NDRPDR":
458                             tbl_dict[tst_name_mod]["cmp-data"].append(
459                                 tst_data["throughput"]["PDR"]["LOWER"])
460                     elif table["include-tests"] == "NDR":
461                         if tst_data["type"] == "NDR":
462                             tbl_dict[tst_name_mod]["cmp-data"]. \
463                                 append(tst_data["throughput"]["value"])
464                         elif tst_data["type"] == "NDRPDR":
465                             tbl_dict[tst_name_mod]["cmp-data"].append(
466                                 tst_data["throughput"]["NDR"]["LOWER"])
467                     else:
468                         continue
469                 except KeyError:
470                     pass
471                 except TypeError:
472                     tbl_dict.pop(tst_name_mod, None)
473     if history:
474         for item in history:
475             for job, builds in item["data"].items():
476                 for build in builds:
477                     for tst_name, tst_data in data[job][str(build)].iteritems():
478                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
479                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
480                             replace("-ndrdisc", "").replace("-pdr", ""). \
481                             replace("-ndr", "")
482                         if tbl_dict.get(tst_name_mod, None) is None:
483                             continue
484                         if tbl_dict[tst_name_mod].get("history", None) is None:
485                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
486                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
487                                                              None) is None:
488                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
489                                 list()
490                         try:
491                             # TODO: Re-work when NDRPDRDISC tests are not used
492                             if table["include-tests"] == "MRR":
493                                 tbl_dict[tst_name_mod]["history"][item["title"
494                                 ]].append(tst_data["result"]["receive-rate"].
495                                           avg)
496                             elif table["include-tests"] == "PDR":
497                                 if tst_data["type"] == "PDR":
498                                     tbl_dict[tst_name_mod]["history"][
499                                         item["title"]].\
500                                         append(tst_data["throughput"]["value"])
501                                 elif tst_data["type"] == "NDRPDR":
502                                     tbl_dict[tst_name_mod]["history"][item[
503                                         "title"]].append(tst_data["throughput"][
504                                         "PDR"]["LOWER"])
505                             elif table["include-tests"] == "NDR":
506                                 if tst_data["type"] == "NDR":
507                                     tbl_dict[tst_name_mod]["history"][
508                                         item["title"]].\
509                                         append(tst_data["throughput"]["value"])
510                                 elif tst_data["type"] == "NDRPDR":
511                                     tbl_dict[tst_name_mod]["history"][item[
512                                         "title"]].append(tst_data["throughput"][
513                                         "NDR"]["LOWER"])
514                             else:
515                                 continue
516                         except (TypeError, KeyError):
517                             pass
518
519     tbl_lst = list()
520     for tst_name in tbl_dict.keys():
521         item = [tbl_dict[tst_name]["name"], ]
522         if history:
523             if tbl_dict[tst_name].get("history", None) is not None:
524                 for hist_data in tbl_dict[tst_name]["history"].values():
525                     if hist_data:
526                         item.append(round(mean(hist_data) / 1000000, 2))
527                         item.append(round(stdev(hist_data) / 1000000, 2))
528                     else:
529                         item.extend([None, None])
530             else:
531                 item.extend([None, None])
532         data_t = tbl_dict[tst_name]["ref-data"]
533         if data_t:
534             item.append(round(mean(data_t) / 1000000, 2))
535             item.append(round(stdev(data_t) / 1000000, 2))
536         else:
537             item.extend([None, None])
538         data_t = tbl_dict[tst_name]["cmp-data"]
539         if data_t:
540             item.append(round(mean(data_t) / 1000000, 2))
541             item.append(round(stdev(data_t) / 1000000, 2))
542         else:
543             item.extend([None, None])
544         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
545             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
546         if len(item) == len(header):
547             tbl_lst.append(item)
548
549     # Sort the table according to the relative change
550     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
551
552     # Generate csv tables:
553     csv_file = "{0}.csv".format(table["output-file"])
554     with open(csv_file, "w") as file_handler:
555         file_handler.write(header_str)
556         for test in tbl_lst:
557             file_handler.write(",".join([str(item) for item in test]) + "\n")
558
559     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
560
561
562 def table_performance_trending_dashboard(table, input_data):
563     """Generate the table(s) with algorithm:
564     table_performance_trending_dashboard
565     specified in the specification file.
566
567     :param table: Table to generate.
568     :param input_data: Data to process.
569     :type table: pandas.Series
570     :type input_data: InputData
571     """
572
573     logging.info("  Generating the table {0} ...".
574                  format(table.get("title", "")))
575
576     # Transform the data
577     logging.info("    Creating the data set for the {0} '{1}'.".
578                  format(table.get("type", ""), table.get("title", "")))
579     data = input_data.filter_data(table, continue_on_error=True)
580
581     # Prepare the header of the tables
582     header = ["Test Case",
583               "Trend [Mpps]",
584               "Short-Term Change [%]",
585               "Long-Term Change [%]",
586               "Regressions [#]",
587               "Progressions [#]"
588               ]
589     header_str = ",".join(header) + "\n"
590
591     # Prepare data to the table:
592     tbl_dict = dict()
593     for job, builds in table["data"].items():
594         for build in builds:
595             for tst_name, tst_data in data[job][str(build)].iteritems():
596                 if tst_name.lower() in table["ignore-list"]:
597                     continue
598                 if tbl_dict.get(tst_name, None) is None:
599                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
600                                             tst_data["name"])
601                     tbl_dict[tst_name] = {"name": name,
602                                           "data": OrderedDict()}
603                 try:
604                     tbl_dict[tst_name]["data"][str(build)] = \
605                         tst_data["result"]["receive-rate"]
606                 except (TypeError, KeyError):
607                     pass  # No data in output.xml for this test
608
609     tbl_lst = list()
610     for tst_name in tbl_dict.keys():
611         data_t = tbl_dict[tst_name]["data"]
612         if len(data_t) < 2:
613             continue
614
615         classification_lst, avgs = classify_anomalies(data_t)
616
617         win_size = min(len(data_t), table["window"])
618         long_win_size = min(len(data_t), table["long-trend-window"])
619
620         try:
621             max_long_avg = max(
622                 [x for x in avgs[-long_win_size:-win_size]
623                  if not isnan(x)])
624         except ValueError:
625             max_long_avg = nan
626         last_avg = avgs[-1]
627         avg_week_ago = avgs[max(-win_size, -len(avgs))]
628
629         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
630             rel_change_last = nan
631         else:
632             rel_change_last = round(
633                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
634
635         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
636             rel_change_long = nan
637         else:
638             rel_change_long = round(
639                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
640
641         if classification_lst:
642             if isnan(rel_change_last) and isnan(rel_change_long):
643                 continue
644             tbl_lst.append(
645                 [tbl_dict[tst_name]["name"],
646                  '-' if isnan(last_avg) else
647                  round(last_avg / 1000000, 2),
648                  '-' if isnan(rel_change_last) else rel_change_last,
649                  '-' if isnan(rel_change_long) else rel_change_long,
650                  classification_lst[-win_size:].count("regression"),
651                  classification_lst[-win_size:].count("progression")])
652
653     tbl_lst.sort(key=lambda rel: rel[0])
654
655     tbl_sorted = list()
656     for nrr in range(table["window"], -1, -1):
657         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
658         for nrp in range(table["window"], -1, -1):
659             tbl_out = [item for item in tbl_reg if item[5] == nrp]
660             tbl_out.sort(key=lambda rel: rel[2])
661             tbl_sorted.extend(tbl_out)
662
663     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
664
665     logging.info("    Writing file: '{0}'".format(file_name))
666     with open(file_name, "w") as file_handler:
667         file_handler.write(header_str)
668         for test in tbl_sorted:
669             file_handler.write(",".join([str(item) for item in test]) + '\n')
670
671     txt_file_name = "{0}.txt".format(table["output-file"])
672     logging.info("    Writing file: '{0}'".format(txt_file_name))
673     convert_csv_to_pretty_txt(file_name, txt_file_name)
674
675
676 def _generate_url(base, test_name):
677     """Generate URL to a trending plot from the name of the test case.
678
679     :param base: The base part of URL common to all test cases.
680     :param test_name: The name of the test case.
681     :type base: str
682     :type test_name: str
683     :returns: The URL to the plot with the trending data for the given test
684         case.
685     :rtype str
686     """
687
688     url = base
689     file_name = ""
690     anchor = "#"
691     feature = ""
692
693     if "lbdpdk" in test_name or "lbvpp" in test_name:
694         file_name = "link_bonding.html"
695
696     elif "testpmd" in test_name or "l3fwd" in test_name:
697         file_name = "dpdk.html"
698
699     elif "memif" in test_name:
700         file_name = "container_memif.html"
701
702     elif "srv6" in test_name:
703         file_name = "srv6.html"
704
705     elif "vhost" in test_name:
706         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
707             file_name = "vm_vhost_l2.html"
708         elif "ip4base" in test_name:
709             file_name = "vm_vhost_ip4.html"
710
711     elif "ipsec" in test_name:
712         file_name = "ipsec.html"
713
714     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
715         file_name = "ip4_tunnels.html"
716
717     elif "ip4base" in test_name or "ip4scale" in test_name:
718         file_name = "ip4.html"
719         if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
720             feature = "-features"
721
722     elif "ip6base" in test_name or "ip6scale" in test_name:
723         file_name = "ip6.html"
724
725     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
726             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
727             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
728         file_name = "l2.html"
729         if "iacl" in test_name:
730             feature = "-features"
731
732     if "x520" in test_name:
733         anchor += "x520-"
734     elif "x710" in test_name:
735         anchor += "x710-"
736     elif "xl710" in test_name:
737         anchor += "xl710-"
738
739     if "64b" in test_name:
740         anchor += "64b-"
741     elif "78b" in test_name:
742         anchor += "78b-"
743     elif "imix" in test_name:
744         anchor += "imix-"
745     elif "9000b" in test_name:
746         anchor += "9000b-"
747     elif "1518" in test_name:
748         anchor += "1518b-"
749
750     if "1t1c" in test_name:
751         anchor += "1t1c"
752     elif "2t2c" in test_name:
753         anchor += "2t2c"
754     elif "4t4c" in test_name:
755         anchor += "4t4c"
756
757     return url + file_name + anchor + feature
758
759
760 def table_performance_trending_dashboard_html(table, input_data):
761     """Generate the table(s) with algorithm:
762     table_performance_trending_dashboard_html specified in the specification
763     file.
764
765     :param table: Table to generate.
766     :param input_data: Data to process.
767     :type table: pandas.Series
768     :type input_data: InputData
769     """
770
771     logging.info("  Generating the table {0} ...".
772                  format(table.get("title", "")))
773
774     try:
775         with open(table["input-file"], 'rb') as csv_file:
776             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
777             csv_lst = [item for item in csv_content]
778     except KeyError:
779         logging.warning("The input file is not defined.")
780         return
781     except csv.Error as err:
782         logging.warning("Not possible to process the file '{0}'.\n{1}".
783                         format(table["input-file"], err))
784         return
785
786     # Table:
787     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
788
789     # Table header:
790     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
791     for idx, item in enumerate(csv_lst[0]):
792         alignment = "left" if idx == 0 else "center"
793         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
794         th.text = item
795
796     # Rows:
797     colors = {"regression": ("#ffcccc", "#ff9999"),
798               "progression": ("#c6ecc6", "#9fdf9f"),
799               "normal": ("#e9f1fb", "#d4e4f7")}
800     for r_idx, row in enumerate(csv_lst[1:]):
801         if int(row[4]):
802             color = "regression"
803         elif int(row[5]):
804             color = "progression"
805         else:
806             color = "normal"
807         background = colors[color][r_idx % 2]
808         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
809
810         # Columns:
811         for c_idx, item in enumerate(row):
812             alignment = "left" if c_idx == 0 else "center"
813             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
814             # Name:
815             if c_idx == 0:
816                 url = _generate_url("../trending/", item)
817                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
818                 ref.text = item
819             else:
820                 td.text = item
821     try:
822         with open(table["output-file"], 'w') as html_file:
823             logging.info("    Writing file: '{0}'".format(table["output-file"]))
824             html_file.write(".. raw:: html\n\n\t")
825             html_file.write(ET.tostring(dashboard))
826             html_file.write("\n\t<p><br><br></p>\n")
827     except KeyError:
828         logging.warning("The output file is not defined.")
829         return
830
831
832 def table_failed_tests(table, input_data):
833     """Generate the table(s) with algorithm: table_failed_tests
834     specified in the specification file.
835
836     :param table: Table to generate.
837     :param input_data: Data to process.
838     :type table: pandas.Series
839     :type input_data: InputData
840     """
841
842     logging.info("  Generating the table {0} ...".
843                  format(table.get("title", "")))
844
845     # Transform the data
846     logging.info("    Creating the data set for the {0} '{1}'.".
847                  format(table.get("type", ""), table.get("title", "")))
848     data = input_data.filter_data(table, continue_on_error=True)
849
850     # Prepare the header of the tables
851     header = ["Test Case",
852               "Failures [#]",
853               "Last Failure [Time]",
854               "Last Failure [VPP-Build-Id]",
855               "Last Failure [CSIT-Job-Build-Id]"]
856
857     # Generate the data for the table according to the model in the table
858     # specification
859     tbl_dict = dict()
860     for job, builds in table["data"].items():
861         for build in builds:
862             build = str(build)
863             for tst_name, tst_data in data[job][build].iteritems():
864                 if tst_name.lower() in table["ignore-list"]:
865                     continue
866                 if tbl_dict.get(tst_name, None) is None:
867                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
868                                             tst_data["name"])
869                     tbl_dict[tst_name] = {"name": name,
870                                           "data": OrderedDict()}
871                 try:
872                     tbl_dict[tst_name]["data"][build] = (
873                         tst_data["status"],
874                         input_data.metadata(job, build).get("generated", ""),
875                         input_data.metadata(job, build).get("version", ""),
876                         build)
877                 except (TypeError, KeyError):
878                     pass  # No data in output.xml for this test
879
880     tbl_lst = list()
881     for tst_data in tbl_dict.values():
882         win_size = min(len(tst_data["data"]), table["window"])
883         fails_nr = 0
884         for val in tst_data["data"].values()[-win_size:]:
885             if val[0] == "FAIL":
886                 fails_nr += 1
887                 fails_last_date = val[1]
888                 fails_last_vpp = val[2]
889                 fails_last_csit = val[3]
890         if fails_nr:
891             tbl_lst.append([tst_data["name"],
892                             fails_nr,
893                             fails_last_date,
894                             fails_last_vpp,
895                             "mrr-daily-build-{0}".format(fails_last_csit)])
896
897     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
898     tbl_sorted = list()
899     for nrf in range(table["window"], -1, -1):
900         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
901         tbl_sorted.extend(tbl_fails)
902     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
903
904     logging.info("    Writing file: '{0}'".format(file_name))
905     with open(file_name, "w") as file_handler:
906         file_handler.write(",".join(header) + "\n")
907         for test in tbl_sorted:
908             file_handler.write(",".join([str(item) for item in test]) + '\n')
909
910     txt_file_name = "{0}.txt".format(table["output-file"])
911     logging.info("    Writing file: '{0}'".format(txt_file_name))
912     convert_csv_to_pretty_txt(file_name, txt_file_name)
913
914
915 def table_failed_tests_html(table, input_data):
916     """Generate the table(s) with algorithm: table_failed_tests_html
917     specified in the specification file.
918
919     :param table: Table to generate.
920     :param input_data: Data to process.
921     :type table: pandas.Series
922     :type input_data: InputData
923     """
924
925     logging.info("  Generating the table {0} ...".
926                  format(table.get("title", "")))
927
928     try:
929         with open(table["input-file"], 'rb') as csv_file:
930             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
931             csv_lst = [item for item in csv_content]
932     except KeyError:
933         logging.warning("The input file is not defined.")
934         return
935     except csv.Error as err:
936         logging.warning("Not possible to process the file '{0}'.\n{1}".
937                         format(table["input-file"], err))
938         return
939
940     # Table:
941     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
942
943     # Table header:
944     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
945     for idx, item in enumerate(csv_lst[0]):
946         alignment = "left" if idx == 0 else "center"
947         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
948         th.text = item
949
950     # Rows:
951     colors = ("#e9f1fb", "#d4e4f7")
952     for r_idx, row in enumerate(csv_lst[1:]):
953         background = colors[r_idx % 2]
954         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
955
956         # Columns:
957         for c_idx, item in enumerate(row):
958             alignment = "left" if c_idx == 0 else "center"
959             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
960             # Name:
961             if c_idx == 0:
962                 url = _generate_url("../trending/", item)
963                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
964                 ref.text = item
965             else:
966                 td.text = item
967     try:
968         with open(table["output-file"], 'w') as html_file:
969             logging.info("    Writing file: '{0}'".format(table["output-file"]))
970             html_file.write(".. raw:: html\n\n\t")
971             html_file.write(ET.tostring(failed_tests))
972             html_file.write("\n\t<p><br><br></p>\n")
973     except KeyError:
974         logging.warning("The output file is not defined.")
975         return