Trending: ignore list for the dashboard
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import prettytable
21 import pandas as pd
22
23 from string import replace
24 from math import isnan
25 from collections import OrderedDict
26 from numpy import nan
27 from xml.etree import ElementTree as ET
28
29 from errors import PresentationError
30 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
31
32
33 def generate_tables(spec, data):
34     """Generate all tables specified in the specification file.
35
36     :param spec: Specification read from the specification file.
37     :param data: Data to process.
38     :type spec: Specification
39     :type data: InputData
40     """
41
42     logging.info("Generating the tables ...")
43     for table in spec.tables:
44         try:
45             eval(table["algorithm"])(table, data)
46         except NameError:
47             logging.error("The algorithm '{0}' is not defined.".
48                           format(table["algorithm"]))
49     logging.info("Done.")
50
51
52 def table_details(table, input_data):
53     """Generate the table(s) with algorithm: table_detailed_test_results
54     specified in the specification file.
55
56     :param table: Table to generate.
57     :param input_data: Data to process.
58     :type table: pandas.Series
59     :type input_data: InputData
60     """
61
62     logging.info("  Generating the table {0} ...".
63                  format(table.get("title", "")))
64
65     # Transform the data
66     data = input_data.filter_data(table)
67
68     # Prepare the header of the tables
69     header = list()
70     for column in table["columns"]:
71         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72
73     # Generate the data for the table according to the model in the table
74     # specification
75     job = table["data"].keys()[0]
76     build = str(table["data"][job][0])
77     try:
78         suites = input_data.suites(job, build)
79     except KeyError:
80         logging.error("    No data available. The table will not be generated.")
81         return
82
83     for suite_longname, suite in suites.iteritems():
84         # Generate data
85         suite_name = suite["name"]
86         table_lst = list()
87         for test in data[job][build].keys():
88             if data[job][build][test]["parent"] in suite_name:
89                 row_lst = list()
90                 for column in table["columns"]:
91                     try:
92                         col_data = str(data[job][build][test][column["data"].
93                                        split(" ")[1]]).replace('"', '""')
94                         if column["data"].split(" ")[1] in ("vat-history",
95                                                             "show-run"):
96                             col_data = replace(col_data, " |br| ", "",
97                                                maxreplace=1)
98                             col_data = " |prein| {0} |preout| ".\
99                                 format(col_data[:-5])
100                         row_lst.append('"{0}"'.format(col_data))
101                     except KeyError:
102                         row_lst.append("No data")
103                 table_lst.append(row_lst)
104
105         # Write the data to file
106         if table_lst:
107             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108                                             table["output-file-ext"])
109             logging.info("      Writing file: '{}'".format(file_name))
110             with open(file_name, "w") as file_handler:
111                 file_handler.write(",".join(header) + "\n")
112                 for item in table_lst:
113                     file_handler.write(",".join(item) + "\n")
114
115     logging.info("  Done.")
116
117
118 def table_merged_details(table, input_data):
119     """Generate the table(s) with algorithm: table_merged_details
120     specified in the specification file.
121
122     :param table: Table to generate.
123     :param input_data: Data to process.
124     :type table: pandas.Series
125     :type input_data: InputData
126     """
127
128     logging.info("  Generating the table {0} ...".
129                  format(table.get("title", "")))
130
131     # Transform the data
132     data = input_data.filter_data(table)
133     data = input_data.merge_data(data)
134     data.sort_index(inplace=True)
135
136     suites = input_data.filter_data(table, data_set="suites")
137     suites = input_data.merge_data(suites)
138
139     # Prepare the header of the tables
140     header = list()
141     for column in table["columns"]:
142         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
143
144     for _, suite in suites.iteritems():
145         # Generate data
146         suite_name = suite["name"]
147         table_lst = list()
148         for test in data.keys():
149             if data[test]["parent"] in suite_name:
150                 row_lst = list()
151                 for column in table["columns"]:
152                     try:
153                         col_data = str(data[test][column["data"].
154                                        split(" ")[1]]).replace('"', '""')
155                         if column["data"].split(" ")[1] in ("vat-history",
156                                                             "show-run"):
157                             col_data = replace(col_data, " |br| ", "",
158                                                maxreplace=1)
159                             col_data = " |prein| {0} |preout| ".\
160                                 format(col_data[:-5])
161                         row_lst.append('"{0}"'.format(col_data))
162                     except KeyError:
163                         row_lst.append("No data")
164                 table_lst.append(row_lst)
165
166         # Write the data to file
167         if table_lst:
168             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
169                                             table["output-file-ext"])
170             logging.info("      Writing file: '{}'".format(file_name))
171             with open(file_name, "w") as file_handler:
172                 file_handler.write(",".join(header) + "\n")
173                 for item in table_lst:
174                     file_handler.write(",".join(item) + "\n")
175
176     logging.info("  Done.")
177
178
179 def table_performance_improvements(table, input_data):
180     """Generate the table(s) with algorithm: table_performance_improvements
181     specified in the specification file.
182
183     :param table: Table to generate.
184     :param input_data: Data to process.
185     :type table: pandas.Series
186     :type input_data: InputData
187     """
188
189     def _write_line_to_file(file_handler, data):
190         """Write a line to the .csv file.
191
192         :param file_handler: File handler for the csv file. It must be open for
193          writing text.
194         :param data: Item to be written to the file.
195         :type file_handler: BinaryIO
196         :type data: list
197         """
198
199         line_lst = list()
200         for item in data:
201             if isinstance(item["data"], str):
202                 # Remove -?drdisc from the end
203                 if item["data"].endswith("drdisc"):
204                     item["data"] = item["data"][:-8]
205                 line_lst.append(item["data"])
206             elif isinstance(item["data"], float):
207                 line_lst.append("{:.1f}".format(item["data"]))
208             elif item["data"] is None:
209                 line_lst.append("")
210         file_handler.write(",".join(line_lst) + "\n")
211
212     logging.info("  Generating the table {0} ...".
213                  format(table.get("title", "")))
214
215     # Read the template
216     file_name = table.get("template", None)
217     if file_name:
218         try:
219             tmpl = _read_csv_template(file_name)
220         except PresentationError:
221             logging.error("  The template '{0}' does not exist. Skipping the "
222                           "table.".format(file_name))
223             return None
224     else:
225         logging.error("The template is not defined. Skipping the table.")
226         return None
227
228     # Transform the data
229     data = input_data.filter_data(table)
230
231     # Prepare the header of the tables
232     header = list()
233     for column in table["columns"]:
234         header.append(column["title"])
235
236     # Generate the data for the table according to the model in the table
237     # specification
238     tbl_lst = list()
239     for tmpl_item in tmpl:
240         tbl_item = list()
241         for column in table["columns"]:
242             cmd = column["data"].split(" ")[0]
243             args = column["data"].split(" ")[1:]
244             if cmd == "template":
245                 try:
246                     val = float(tmpl_item[int(args[0])])
247                 except ValueError:
248                     val = tmpl_item[int(args[0])]
249                 tbl_item.append({"data": val})
250             elif cmd == "data":
251                 jobs = args[0:-1]
252                 operation = args[-1]
253                 data_lst = list()
254                 for job in jobs:
255                     for build in data[job]:
256                         try:
257                             data_lst.append(float(build[tmpl_item[0]]
258                                                   ["throughput"]["value"]))
259                         except (KeyError, TypeError):
260                             # No data, ignore
261                             continue
262                 if data_lst:
263                     tbl_item.append({"data": (eval(operation)(data_lst)) /
264                                              1000000})
265                 else:
266                     tbl_item.append({"data": None})
267             elif cmd == "operation":
268                 operation = args[0]
269                 try:
270                     nr1 = float(tbl_item[int(args[1])]["data"])
271                     nr2 = float(tbl_item[int(args[2])]["data"])
272                     if nr1 and nr2:
273                         tbl_item.append({"data": eval(operation)(nr1, nr2)})
274                     else:
275                         tbl_item.append({"data": None})
276                 except (IndexError, ValueError, TypeError):
277                     logging.error("No data for {0}".format(tbl_item[0]["data"]))
278                     tbl_item.append({"data": None})
279                     continue
280             else:
281                 logging.error("Not supported command {0}. Skipping the table.".
282                               format(cmd))
283                 return None
284         tbl_lst.append(tbl_item)
285
286     # Sort the table according to the relative change
287     tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
288
289     # Create the tables and write them to the files
290     file_names = [
291         "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
292         "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
293         "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
294         "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
295     ]
296
297     for file_name in file_names:
298         logging.info("    Writing the file '{0}'".format(file_name))
299         with open(file_name, "w") as file_handler:
300             file_handler.write(",".join(header) + "\n")
301             for item in tbl_lst:
302                 if isinstance(item[-1]["data"], float):
303                     rel_change = round(item[-1]["data"], 1)
304                 else:
305                     rel_change = item[-1]["data"]
306                 if "ndr_top" in file_name \
307                         and "ndr" in item[0]["data"] \
308                         and rel_change >= 10.0:
309                     _write_line_to_file(file_handler, item)
310                 elif "pdr_top" in file_name \
311                         and "pdr" in item[0]["data"] \
312                         and rel_change >= 10.0:
313                     _write_line_to_file(file_handler, item)
314                 elif "ndr_low" in file_name \
315                         and "ndr" in item[0]["data"] \
316                         and rel_change < 10.0:
317                     _write_line_to_file(file_handler, item)
318                 elif "pdr_low" in file_name \
319                         and "pdr" in item[0]["data"] \
320                         and rel_change < 10.0:
321                     _write_line_to_file(file_handler, item)
322
323     logging.info("  Done.")
324
325
326 def _read_csv_template(file_name):
327     """Read the template from a .csv file.
328
329     :param file_name: Name / full path / relative path of the file to read.
330     :type file_name: str
331     :returns: Data from the template as list (lines) of lists (items on line).
332     :rtype: list
333     :raises: PresentationError if it is not possible to read the file.
334     """
335
336     try:
337         with open(file_name, 'r') as csv_file:
338             tmpl_data = list()
339             for line in csv_file:
340                 tmpl_data.append(line[:-1].split(","))
341         return tmpl_data
342     except IOError as err:
343         raise PresentationError(str(err), level="ERROR")
344
345
346 def table_performance_comparison(table, input_data):
347     """Generate the table(s) with algorithm: table_performance_comparison
348     specified in the specification file.
349
350     :param table: Table to generate.
351     :param input_data: Data to process.
352     :type table: pandas.Series
353     :type input_data: InputData
354     """
355
356     logging.info("  Generating the table {0} ...".
357                  format(table.get("title", "")))
358
359     # Transform the data
360     data = input_data.filter_data(table, continue_on_error=True)
361
362     # Prepare the header of the tables
363     try:
364         header = ["Test case", ]
365
366         history = table.get("history", None)
367         if history:
368             for item in history:
369                 header.extend(
370                     ["{0} Throughput [Mpps]".format(item["title"]),
371                      "{0} Stdev [Mpps]".format(item["title"])])
372         header.extend(
373             ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
374              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
375              "{0} Throughput [Mpps]".format(table["compare"]["title"]),
376              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
377              "Change [%]"])
378         header_str = ",".join(header) + "\n"
379     except (AttributeError, KeyError) as err:
380         logging.error("The model is invalid, missing parameter: {0}".
381                       format(err))
382         return
383
384     # Prepare data to the table:
385     tbl_dict = dict()
386     for job, builds in table["reference"]["data"].items():
387         for build in builds:
388             for tst_name, tst_data in data[job][str(build)].iteritems():
389                 if tbl_dict.get(tst_name, None) is None:
390                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
391                                             "-".join(tst_data["name"].
392                                                      split("-")[1:]))
393                     tbl_dict[tst_name] = {"name": name,
394                                           "ref-data": list(),
395                                           "cmp-data": list()}
396                 try:
397                     tbl_dict[tst_name]["ref-data"].\
398                         append(tst_data["throughput"]["value"])
399                 except TypeError:
400                     pass  # No data in output.xml for this test
401
402     for job, builds in table["compare"]["data"].items():
403         for build in builds:
404             for tst_name, tst_data in data[job][str(build)].iteritems():
405                 try:
406                     tbl_dict[tst_name]["cmp-data"].\
407                         append(tst_data["throughput"]["value"])
408                 except KeyError:
409                     pass
410                 except TypeError:
411                     tbl_dict.pop(tst_name, None)
412     if history:
413         for item in history:
414             for job, builds in item["data"].items():
415                 for build in builds:
416                     for tst_name, tst_data in data[job][str(build)].iteritems():
417                         if tbl_dict.get(tst_name, None) is None:
418                             continue
419                         if tbl_dict[tst_name].get("history", None) is None:
420                             tbl_dict[tst_name]["history"] = OrderedDict()
421                         if tbl_dict[tst_name]["history"].get(item["title"],
422                                                              None) is None:
423                             tbl_dict[tst_name]["history"][item["title"]] = \
424                                 list()
425                         try:
426                             tbl_dict[tst_name]["history"][item["title"]].\
427                                 append(tst_data["throughput"]["value"])
428                         except (TypeError, KeyError):
429                             pass
430
431     tbl_lst = list()
432     for tst_name in tbl_dict.keys():
433         item = [tbl_dict[tst_name]["name"], ]
434         if history:
435             if tbl_dict[tst_name].get("history", None) is not None:
436                 for hist_data in tbl_dict[tst_name]["history"].values():
437                     if hist_data:
438                         data_t = remove_outliers(
439                             hist_data, outlier_const=table["outlier-const"])
440                         if data_t:
441                             item.append(round(mean(data_t) / 1000000, 2))
442                             item.append(round(stdev(data_t) / 1000000, 2))
443                         else:
444                             item.extend([None, None])
445                     else:
446                         item.extend([None, None])
447             else:
448                 item.extend([None, None])
449         if tbl_dict[tst_name]["ref-data"]:
450             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
451                                      outlier_const=table["outlier-const"])
452             # TODO: Specify window size.
453             if data_t:
454                 item.append(round(mean(data_t) / 1000000, 2))
455                 item.append(round(stdev(data_t) / 1000000, 2))
456             else:
457                 item.extend([None, None])
458         else:
459             item.extend([None, None])
460         if tbl_dict[tst_name]["cmp-data"]:
461             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
462                                      outlier_const=table["outlier-const"])
463             # TODO: Specify window size.
464             if data_t:
465                 item.append(round(mean(data_t) / 1000000, 2))
466                 item.append(round(stdev(data_t) / 1000000, 2))
467             else:
468                 item.extend([None, None])
469         else:
470             item.extend([None, None])
471         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
472             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
473         if len(item) == len(header):
474             tbl_lst.append(item)
475
476     # Sort the table according to the relative change
477     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
478
479     # Generate tables:
480     # All tests in csv:
481     tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
482                                                table["output-file-ext"]),
483                  "{0}-ndr-2t2c-full{1}".format(table["output-file"],
484                                                table["output-file-ext"]),
485                  "{0}-ndr-4t4c-full{1}".format(table["output-file"],
486                                                table["output-file-ext"]),
487                  "{0}-pdr-1t1c-full{1}".format(table["output-file"],
488                                                table["output-file-ext"]),
489                  "{0}-pdr-2t2c-full{1}".format(table["output-file"],
490                                                table["output-file-ext"]),
491                  "{0}-pdr-4t4c-full{1}".format(table["output-file"],
492                                                table["output-file-ext"])
493                  ]
494     for file_name in tbl_names:
495         logging.info("      Writing file: '{0}'".format(file_name))
496         with open(file_name, "w") as file_handler:
497             file_handler.write(header_str)
498             for test in tbl_lst:
499                 if (file_name.split("-")[-3] in test[0] and    # NDR vs PDR
500                         file_name.split("-")[-2] in test[0]):  # cores
501                     test[0] = "-".join(test[0].split("-")[:-1])
502                     file_handler.write(",".join([str(item) for item in test]) +
503                                        "\n")
504
505     # All tests in txt:
506     tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
507                      "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
508                      "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
509                      "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
510                      "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
511                      "{0}-pdr-4t4c-full.txt".format(table["output-file"])
512                      ]
513
514     for i, txt_name in enumerate(tbl_names_txt):
515         txt_table = None
516         logging.info("      Writing file: '{0}'".format(txt_name))
517         with open(tbl_names[i], 'rb') as csv_file:
518             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
519             for row in csv_content:
520                 if txt_table is None:
521                     txt_table = prettytable.PrettyTable(row)
522                 else:
523                     txt_table.add_row(row)
524             txt_table.align["Test case"] = "l"
525         with open(txt_name, "w") as txt_file:
526             txt_file.write(str(txt_table))
527
528     # Selected tests in csv:
529     input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
530                                                table["output-file-ext"])
531     with open(input_file, "r") as in_file:
532         lines = list()
533         for line in in_file:
534             lines.append(line)
535
536     output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
537                                                table["output-file-ext"])
538     logging.info("      Writing file: '{0}'".format(output_file))
539     with open(output_file, "w") as out_file:
540         out_file.write(header_str)
541         for i, line in enumerate(lines[1:]):
542             if i == table["nr-of-tests-shown"]:
543                 break
544             out_file.write(line)
545
546     output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
547                                                   table["output-file-ext"])
548     logging.info("      Writing file: '{0}'".format(output_file))
549     with open(output_file, "w") as out_file:
550         out_file.write(header_str)
551         for i, line in enumerate(lines[-1:0:-1]):
552             if i == table["nr-of-tests-shown"]:
553                 break
554             out_file.write(line)
555
556     input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
557                                                table["output-file-ext"])
558     with open(input_file, "r") as in_file:
559         lines = list()
560         for line in in_file:
561             lines.append(line)
562
563     output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
564                                                table["output-file-ext"])
565     logging.info("      Writing file: '{0}'".format(output_file))
566     with open(output_file, "w") as out_file:
567         out_file.write(header_str)
568         for i, line in enumerate(lines[1:]):
569             if i == table["nr-of-tests-shown"]:
570                 break
571             out_file.write(line)
572
573     output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
574                                                   table["output-file-ext"])
575     logging.info("      Writing file: '{0}'".format(output_file))
576     with open(output_file, "w") as out_file:
577         out_file.write(header_str)
578         for i, line in enumerate(lines[-1:0:-1]):
579             if i == table["nr-of-tests-shown"]:
580                 break
581             out_file.write(line)
582
583
584 def table_performance_comparison_mrr(table, input_data):
585     """Generate the table(s) with algorithm: table_performance_comparison_mrr
586     specified in the specification file.
587
588     :param table: Table to generate.
589     :param input_data: Data to process.
590     :type table: pandas.Series
591     :type input_data: InputData
592     """
593
594     logging.info("  Generating the table {0} ...".
595                  format(table.get("title", "")))
596
597     # Transform the data
598     data = input_data.filter_data(table, continue_on_error=True)
599
600     # Prepare the header of the tables
601     try:
602         header = ["Test case",
603                   "{0} Throughput [Mpps]".format(table["reference"]["title"]),
604                   "{0} stdev [Mpps]".format(table["reference"]["title"]),
605                   "{0} Throughput [Mpps]".format(table["compare"]["title"]),
606                   "{0} stdev [Mpps]".format(table["compare"]["title"]),
607                   "Change [%]"]
608         header_str = ",".join(header) + "\n"
609     except (AttributeError, KeyError) as err:
610         logging.error("The model is invalid, missing parameter: {0}".
611                       format(err))
612         return
613
614     # Prepare data to the table:
615     tbl_dict = dict()
616     for job, builds in table["reference"]["data"].items():
617         for build in builds:
618             for tst_name, tst_data in data[job][str(build)].iteritems():
619                 if tbl_dict.get(tst_name, None) is None:
620                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
621                                             "-".join(tst_data["name"].
622                                                      split("-")[1:]))
623                     tbl_dict[tst_name] = {"name": name,
624                                           "ref-data": list(),
625                                           "cmp-data": list()}
626                 try:
627                     tbl_dict[tst_name]["ref-data"].\
628                         append(tst_data["result"]["throughput"])
629                 except TypeError:
630                     pass  # No data in output.xml for this test
631
632     for job, builds in table["compare"]["data"].items():
633         for build in builds:
634             for tst_name, tst_data in data[job][str(build)].iteritems():
635                 try:
636                     tbl_dict[tst_name]["cmp-data"].\
637                         append(tst_data["result"]["throughput"])
638                 except KeyError:
639                     pass
640                 except TypeError:
641                     tbl_dict.pop(tst_name, None)
642
643     tbl_lst = list()
644     for tst_name in tbl_dict.keys():
645         item = [tbl_dict[tst_name]["name"], ]
646         if tbl_dict[tst_name]["ref-data"]:
647             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
648                                      outlier_const=table["outlier-const"])
649             # TODO: Specify window size.
650             if data_t:
651                 item.append(round(mean(data_t) / 1000000, 2))
652                 item.append(round(stdev(data_t) / 1000000, 2))
653             else:
654                 item.extend([None, None])
655         else:
656             item.extend([None, None])
657         if tbl_dict[tst_name]["cmp-data"]:
658             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
659                                      outlier_const=table["outlier-const"])
660             # TODO: Specify window size.
661             if data_t:
662                 item.append(round(mean(data_t) / 1000000, 2))
663                 item.append(round(stdev(data_t) / 1000000, 2))
664             else:
665                 item.extend([None, None])
666         else:
667             item.extend([None, None])
668         if item[1] is not None and item[3] is not None and item[1] != 0:
669             item.append(int(relative_change(float(item[1]), float(item[3]))))
670         if len(item) == 6:
671             tbl_lst.append(item)
672
673     # Sort the table according to the relative change
674     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
675
676     # Generate tables:
677     # All tests in csv:
678     tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
679                                            table["output-file-ext"]),
680                  "{0}-2t2c-full{1}".format(table["output-file"],
681                                            table["output-file-ext"]),
682                  "{0}-4t4c-full{1}".format(table["output-file"],
683                                            table["output-file-ext"])
684                  ]
685     for file_name in tbl_names:
686         logging.info("      Writing file: '{0}'".format(file_name))
687         with open(file_name, "w") as file_handler:
688             file_handler.write(header_str)
689             for test in tbl_lst:
690                 if file_name.split("-")[-2] in test[0]:  # cores
691                     test[0] = "-".join(test[0].split("-")[:-1])
692                     file_handler.write(",".join([str(item) for item in test]) +
693                                        "\n")
694
695     # All tests in txt:
696     tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
697                      "{0}-2t2c-full.txt".format(table["output-file"]),
698                      "{0}-4t4c-full.txt".format(table["output-file"])
699                      ]
700
701     for i, txt_name in enumerate(tbl_names_txt):
702         txt_table = None
703         logging.info("      Writing file: '{0}'".format(txt_name))
704         with open(tbl_names[i], 'rb') as csv_file:
705             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
706             for row in csv_content:
707                 if txt_table is None:
708                     txt_table = prettytable.PrettyTable(row)
709                 else:
710                     txt_table.add_row(row)
711             txt_table.align["Test case"] = "l"
712         with open(txt_name, "w") as txt_file:
713             txt_file.write(str(txt_table))
714
715
716 def table_performance_trending_dashboard(table, input_data):
717     """Generate the table(s) with algorithm: table_performance_comparison
718     specified in the specification file.
719
720     :param table: Table to generate.
721     :param input_data: Data to process.
722     :type table: pandas.Series
723     :type input_data: InputData
724     """
725
726     logging.info("  Generating the table {0} ...".
727                  format(table.get("title", "")))
728
729     # Transform the data
730     data = input_data.filter_data(table, continue_on_error=True)
731
732     # Prepare the header of the tables
733     header = ["        Test Case",
734               "Trend [Mpps]",
735               "  Short-Term   Change [%]",
736               "  Long-Term   Change [%]",
737               "  Regressions [#]",
738               "  Progressions [#]",
739               "  Outliers [#]"
740               ]
741     header_str = ",".join(header) + "\n"
742
743     # Prepare data to the table:
744     tbl_dict = dict()
745     for job, builds in table["data"].items():
746         for build in builds:
747             for tst_name, tst_data in data[job][str(build)].iteritems():
748                 if tst_name.lower() in table["ignore-list"]:
749                     continue
750                 if tbl_dict.get(tst_name, None) is None:
751                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
752                                             "-".join(tst_data["name"].
753                                                      split("-")[1:]))
754                     tbl_dict[tst_name] = {"name": name,
755                                           "data": dict()}
756                 try:
757                     tbl_dict[tst_name]["data"][str(build)] =  \
758                         tst_data["result"]["throughput"]
759                 except (TypeError, KeyError):
760                     pass  # No data in output.xml for this test
761
762     tbl_lst = list()
763     for tst_name in tbl_dict.keys():
764         if len(tbl_dict[tst_name]["data"]) > 2:
765
766             pd_data = pd.Series(tbl_dict[tst_name]["data"])
767             last_key = pd_data.keys()[-1]
768             win_size = min(pd_data.size, table["window"])
769             win_first_idx = pd_data.size - win_size
770             key_14 = pd_data.keys()[win_first_idx]
771             long_win_size = min(pd_data.size, table["long-trend-window"])
772
773             data_t, _ = split_outliers(pd_data, outlier_const=1.5,
774                                        window=win_size)
775
776             median_t = data_t.rolling(window=win_size, min_periods=2).median()
777             stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
778             median_first_idx = pd_data.size - long_win_size
779             try:
780                 max_median = max(
781                     [x for x in median_t.values[median_first_idx:-win_size]
782                      if not isnan(x)])
783             except ValueError:
784                 max_median = nan
785             try:
786                 last_median_t = median_t[last_key]
787             except KeyError:
788                 last_median_t = nan
789             try:
790                 median_t_14 = median_t[key_14]
791             except KeyError:
792                 median_t_14 = nan
793
794             # Test name:
795             name = tbl_dict[tst_name]["name"]
796
797             # Classification list:
798             classification_lst = list()
799             for build_nr, value in pd_data.iteritems():
800
801                 if isnan(data_t[build_nr]) \
802                         or isnan(median_t[build_nr]) \
803                         or isnan(stdev_t[build_nr]) \
804                         or isnan(value):
805                     classification_lst.append("outlier")
806                 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
807                     classification_lst.append("regression")
808                 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
809                     classification_lst.append("progression")
810                 else:
811                     classification_lst.append("normal")
812
813             if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
814                 rel_change_last = nan
815             else:
816                 rel_change_last = round(
817                     ((last_median_t - median_t_14) / median_t_14) * 100, 2)
818
819             if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
820                 rel_change_long = nan
821             else:
822                 rel_change_long = round(
823                     ((last_median_t - max_median) / max_median) * 100, 2)
824
825             logging.info("rel_change_last : {}".format(rel_change_last))
826             logging.info("rel_change_long : {}".format(rel_change_long))
827
828             tbl_lst.append(
829                 [name,
830                  '-' if isnan(last_median_t) else
831                  round(last_median_t / 1000000, 2),
832                  '-' if isnan(rel_change_last) else rel_change_last,
833                  '-' if isnan(rel_change_long) else rel_change_long,
834                  classification_lst[win_first_idx:].count("regression"),
835                  classification_lst[win_first_idx:].count("progression"),
836                  classification_lst[win_first_idx:].count("outlier")])
837
838     tbl_lst.sort(key=lambda rel: rel[0])
839
840     tbl_sorted = list()
841     for nrr in range(table["window"], -1, -1):
842         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
843         for nrp in range(table["window"], -1, -1):
844             tbl_pro = [item for item in tbl_reg if item[5] == nrp]
845             for nro in range(table["window"], -1, -1):
846                 tbl_out = [item for item in tbl_pro if item[6] == nro]
847                 tbl_out.sort(key=lambda rel: rel[2])
848                 tbl_sorted.extend(tbl_out)
849
850     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
851
852     logging.info("      Writing file: '{0}'".format(file_name))
853     with open(file_name, "w") as file_handler:
854         file_handler.write(header_str)
855         for test in tbl_sorted:
856             file_handler.write(",".join([str(item) for item in test]) + '\n')
857
858     txt_file_name = "{0}.txt".format(table["output-file"])
859     txt_table = None
860     logging.info("      Writing file: '{0}'".format(txt_file_name))
861     with open(file_name, 'rb') as csv_file:
862         csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
863         for row in csv_content:
864             if txt_table is None:
865                 txt_table = prettytable.PrettyTable(row)
866             else:
867                 txt_table.add_row(row)
868         txt_table.align["Test case"] = "l"
869     with open(txt_file_name, "w") as txt_file:
870         txt_file.write(str(txt_table))
871
872
873 def table_performance_trending_dashboard_html(table, input_data):
874     """Generate the table(s) with algorithm:
875     table_performance_trending_dashboard_html specified in the specification
876     file.
877
878     :param table: Table to generate.
879     :param input_data: Data to process.
880     :type table: pandas.Series
881     :type input_data: InputData
882     """
883
884     logging.info("  Generating the table {0} ...".
885                  format(table.get("title", "")))
886
887     try:
888         with open(table["input-file"], 'rb') as csv_file:
889             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
890             csv_lst = [item for item in csv_content]
891     except KeyError:
892         logging.warning("The input file is not defined.")
893         return
894     except csv.Error as err:
895         logging.warning("Not possible to process the file '{0}'.\n{1}".
896                         format(table["input-file"], err))
897         return
898
899     # Table:
900     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
901
902     # Table header:
903     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
904     for idx, item in enumerate(csv_lst[0]):
905         alignment = "left" if idx == 0 else "center"
906         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
907         th.text = item
908
909     # Rows:
910     colors = {"regression": ("#ffcccc", "#ff9999"),
911               "progression": ("#c6ecc6", "#9fdf9f"),
912               "outlier": ("#e6e6e6", "#cccccc"),
913               "normal": ("#e9f1fb", "#d4e4f7")}
914     for r_idx, row in enumerate(csv_lst[1:]):
915         if int(row[4]):
916             color = "regression"
917         elif int(row[5]):
918             color = "progression"
919         elif int(row[6]):
920             color = "outlier"
921         else:
922             color = "normal"
923         background = colors[color][r_idx % 2]
924         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
925
926         # Columns:
927         for c_idx, item in enumerate(row):
928             alignment = "left" if c_idx == 0 else "center"
929             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
930             # Name:
931             url = "../trending/"
932             file_name = ""
933             anchor = "#"
934             feature = ""
935             if c_idx == 0:
936                 if "memif" in item:
937                     file_name = "container_memif.html"
938
939                 elif "vhost" in item:
940                     if "l2xcbase" in item or "l2bdbasemaclrn" in item:
941                         file_name = "vm_vhost_l2.html"
942                     elif "ip4base" in item:
943                         file_name = "vm_vhost_ip4.html"
944
945                 elif "ipsec" in item:
946                     file_name = "ipsec.html"
947
948                 elif "ethip4lispip" in item or "ethip4vxlan" in item:
949                     file_name = "ip4_tunnels.html"
950
951                 elif "ip4base" in item or "ip4scale" in item:
952                     file_name = "ip4.html"
953                     if "iacl" in item or "snat" in item or "cop" in item:
954                         feature = "-features"
955
956                 elif "ip6base" in item or "ip6scale" in item:
957                     file_name = "ip6.html"
958
959                 elif "l2xcbase" in item or "l2xcscale" in item \
960                         or "l2bdbasemaclrn" in item or "l2bdscale" in item \
961                         or "l2dbbasemaclrn" in item or "l2dbscale" in item:
962                     file_name = "l2.html"
963                     if "iacl" in item:
964                         feature = "-features"
965
966                 if "x520" in item:
967                     anchor += "x520-"
968                 elif "x710" in item:
969                     anchor += "x710-"
970                 elif "xl710" in item:
971                     anchor += "xl710-"
972
973                 if "64b" in item:
974                     anchor += "64b-"
975                 elif "78b" in item:
976                     anchor += "78b"
977                 elif "imix" in item:
978                     anchor += "imix-"
979                 elif "9000b" in item:
980                     anchor += "9000b-"
981                 elif "1518" in item:
982                     anchor += "1518b-"
983
984                 if "1t1c" in item:
985                     anchor += "1t1c"
986                 elif "2t2c" in item:
987                     anchor += "2t2c"
988                 elif "4t4c" in item:
989                     anchor += "4t4c"
990
991                 url = url + file_name + anchor + feature
992
993                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
994                 ref.text = item
995
996             if c_idx > 0:
997                 td.text = item
998
999     try:
1000         with open(table["output-file"], 'w') as html_file:
1001             logging.info("      Writing file: '{0}'".
1002                          format(table["output-file"]))
1003             html_file.write(".. raw:: html\n\n\t")
1004             html_file.write(ET.tostring(dashboard))
1005             html_file.write("\n\t<p><br><br></p>\n")
1006     except KeyError:
1007         logging.warning("The output file is not defined.")
1008         return