CSIT-1041: Trending dashboard
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import prettytable
21 import pandas as pd
22
23 from string import replace
24 from math import isnan
25 from collections import OrderedDict
26 from numpy import nan
27 from xml.etree import ElementTree as ET
28
29 from errors import PresentationError
30 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
31
32
33 def generate_tables(spec, data):
34     """Generate all tables specified in the specification file.
35
36     :param spec: Specification read from the specification file.
37     :param data: Data to process.
38     :type spec: Specification
39     :type data: InputData
40     """
41
42     logging.info("Generating the tables ...")
43     for table in spec.tables:
44         try:
45             eval(table["algorithm"])(table, data)
46         except NameError:
47             logging.error("The algorithm '{0}' is not defined.".
48                           format(table["algorithm"]))
49     logging.info("Done.")
50
51
52 def table_details(table, input_data):
53     """Generate the table(s) with algorithm: table_detailed_test_results
54     specified in the specification file.
55
56     :param table: Table to generate.
57     :param input_data: Data to process.
58     :type table: pandas.Series
59     :type input_data: InputData
60     """
61
62     logging.info("  Generating the table {0} ...".
63                  format(table.get("title", "")))
64
65     # Transform the data
66     data = input_data.filter_data(table)
67
68     # Prepare the header of the tables
69     header = list()
70     for column in table["columns"]:
71         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72
73     # Generate the data for the table according to the model in the table
74     # specification
75     job = table["data"].keys()[0]
76     build = str(table["data"][job][0])
77     try:
78         suites = input_data.suites(job, build)
79     except KeyError:
80         logging.error("    No data available. The table will not be generated.")
81         return
82
83     for suite_longname, suite in suites.iteritems():
84         # Generate data
85         suite_name = suite["name"]
86         table_lst = list()
87         for test in data[job][build].keys():
88             if data[job][build][test]["parent"] in suite_name:
89                 row_lst = list()
90                 for column in table["columns"]:
91                     try:
92                         col_data = str(data[job][build][test][column["data"].
93                                        split(" ")[1]]).replace('"', '""')
94                         if column["data"].split(" ")[1] in ("vat-history",
95                                                             "show-run"):
96                             col_data = replace(col_data, " |br| ", "",
97                                                maxreplace=1)
98                             col_data = " |prein| {0} |preout| ".\
99                                 format(col_data[:-5])
100                         row_lst.append('"{0}"'.format(col_data))
101                     except KeyError:
102                         row_lst.append("No data")
103                 table_lst.append(row_lst)
104
105         # Write the data to file
106         if table_lst:
107             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108                                             table["output-file-ext"])
109             logging.info("      Writing file: '{}'".format(file_name))
110             with open(file_name, "w") as file_handler:
111                 file_handler.write(",".join(header) + "\n")
112                 for item in table_lst:
113                     file_handler.write(",".join(item) + "\n")
114
115     logging.info("  Done.")
116
117
118 def table_merged_details(table, input_data):
119     """Generate the table(s) with algorithm: table_merged_details
120     specified in the specification file.
121
122     :param table: Table to generate.
123     :param input_data: Data to process.
124     :type table: pandas.Series
125     :type input_data: InputData
126     """
127
128     logging.info("  Generating the table {0} ...".
129                  format(table.get("title", "")))
130
131     # Transform the data
132     data = input_data.filter_data(table)
133     data = input_data.merge_data(data)
134     data.sort_index(inplace=True)
135
136     suites = input_data.filter_data(table, data_set="suites")
137     suites = input_data.merge_data(suites)
138
139     # Prepare the header of the tables
140     header = list()
141     for column in table["columns"]:
142         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
143
144     for _, suite in suites.iteritems():
145         # Generate data
146         suite_name = suite["name"]
147         table_lst = list()
148         for test in data.keys():
149             if data[test]["parent"] in suite_name:
150                 row_lst = list()
151                 for column in table["columns"]:
152                     try:
153                         col_data = str(data[test][column["data"].
154                                        split(" ")[1]]).replace('"', '""')
155                         if column["data"].split(" ")[1] in ("vat-history",
156                                                             "show-run"):
157                             col_data = replace(col_data, " |br| ", "",
158                                                maxreplace=1)
159                             col_data = " |prein| {0} |preout| ".\
160                                 format(col_data[:-5])
161                         row_lst.append('"{0}"'.format(col_data))
162                     except KeyError:
163                         row_lst.append("No data")
164                 table_lst.append(row_lst)
165
166         # Write the data to file
167         if table_lst:
168             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
169                                             table["output-file-ext"])
170             logging.info("      Writing file: '{}'".format(file_name))
171             with open(file_name, "w") as file_handler:
172                 file_handler.write(",".join(header) + "\n")
173                 for item in table_lst:
174                     file_handler.write(",".join(item) + "\n")
175
176     logging.info("  Done.")
177
178
179 def table_performance_improvements(table, input_data):
180     """Generate the table(s) with algorithm: table_performance_improvements
181     specified in the specification file.
182
183     :param table: Table to generate.
184     :param input_data: Data to process.
185     :type table: pandas.Series
186     :type input_data: InputData
187     """
188
189     def _write_line_to_file(file_handler, data):
190         """Write a line to the .csv file.
191
192         :param file_handler: File handler for the csv file. It must be open for
193          writing text.
194         :param data: Item to be written to the file.
195         :type file_handler: BinaryIO
196         :type data: list
197         """
198
199         line_lst = list()
200         for item in data:
201             if isinstance(item["data"], str):
202                 # Remove -?drdisc from the end
203                 if item["data"].endswith("drdisc"):
204                     item["data"] = item["data"][:-8]
205                 line_lst.append(item["data"])
206             elif isinstance(item["data"], float):
207                 line_lst.append("{:.1f}".format(item["data"]))
208             elif item["data"] is None:
209                 line_lst.append("")
210         file_handler.write(",".join(line_lst) + "\n")
211
212     logging.info("  Generating the table {0} ...".
213                  format(table.get("title", "")))
214
215     # Read the template
216     file_name = table.get("template", None)
217     if file_name:
218         try:
219             tmpl = _read_csv_template(file_name)
220         except PresentationError:
221             logging.error("  The template '{0}' does not exist. Skipping the "
222                           "table.".format(file_name))
223             return None
224     else:
225         logging.error("The template is not defined. Skipping the table.")
226         return None
227
228     # Transform the data
229     data = input_data.filter_data(table)
230
231     # Prepare the header of the tables
232     header = list()
233     for column in table["columns"]:
234         header.append(column["title"])
235
236     # Generate the data for the table according to the model in the table
237     # specification
238     tbl_lst = list()
239     for tmpl_item in tmpl:
240         tbl_item = list()
241         for column in table["columns"]:
242             cmd = column["data"].split(" ")[0]
243             args = column["data"].split(" ")[1:]
244             if cmd == "template":
245                 try:
246                     val = float(tmpl_item[int(args[0])])
247                 except ValueError:
248                     val = tmpl_item[int(args[0])]
249                 tbl_item.append({"data": val})
250             elif cmd == "data":
251                 jobs = args[0:-1]
252                 operation = args[-1]
253                 data_lst = list()
254                 for job in jobs:
255                     for build in data[job]:
256                         try:
257                             data_lst.append(float(build[tmpl_item[0]]
258                                                   ["throughput"]["value"]))
259                         except (KeyError, TypeError):
260                             # No data, ignore
261                             continue
262                 if data_lst:
263                     tbl_item.append({"data": (eval(operation)(data_lst)) /
264                                              1000000})
265                 else:
266                     tbl_item.append({"data": None})
267             elif cmd == "operation":
268                 operation = args[0]
269                 try:
270                     nr1 = float(tbl_item[int(args[1])]["data"])
271                     nr2 = float(tbl_item[int(args[2])]["data"])
272                     if nr1 and nr2:
273                         tbl_item.append({"data": eval(operation)(nr1, nr2)})
274                     else:
275                         tbl_item.append({"data": None})
276                 except (IndexError, ValueError, TypeError):
277                     logging.error("No data for {0}".format(tbl_item[0]["data"]))
278                     tbl_item.append({"data": None})
279                     continue
280             else:
281                 logging.error("Not supported command {0}. Skipping the table.".
282                               format(cmd))
283                 return None
284         tbl_lst.append(tbl_item)
285
286     # Sort the table according to the relative change
287     tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
288
289     # Create the tables and write them to the files
290     file_names = [
291         "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
292         "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
293         "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
294         "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
295     ]
296
297     for file_name in file_names:
298         logging.info("    Writing the file '{0}'".format(file_name))
299         with open(file_name, "w") as file_handler:
300             file_handler.write(",".join(header) + "\n")
301             for item in tbl_lst:
302                 if isinstance(item[-1]["data"], float):
303                     rel_change = round(item[-1]["data"], 1)
304                 else:
305                     rel_change = item[-1]["data"]
306                 if "ndr_top" in file_name \
307                         and "ndr" in item[0]["data"] \
308                         and rel_change >= 10.0:
309                     _write_line_to_file(file_handler, item)
310                 elif "pdr_top" in file_name \
311                         and "pdr" in item[0]["data"] \
312                         and rel_change >= 10.0:
313                     _write_line_to_file(file_handler, item)
314                 elif "ndr_low" in file_name \
315                         and "ndr" in item[0]["data"] \
316                         and rel_change < 10.0:
317                     _write_line_to_file(file_handler, item)
318                 elif "pdr_low" in file_name \
319                         and "pdr" in item[0]["data"] \
320                         and rel_change < 10.0:
321                     _write_line_to_file(file_handler, item)
322
323     logging.info("  Done.")
324
325
326 def _read_csv_template(file_name):
327     """Read the template from a .csv file.
328
329     :param file_name: Name / full path / relative path of the file to read.
330     :type file_name: str
331     :returns: Data from the template as list (lines) of lists (items on line).
332     :rtype: list
333     :raises: PresentationError if it is not possible to read the file.
334     """
335
336     try:
337         with open(file_name, 'r') as csv_file:
338             tmpl_data = list()
339             for line in csv_file:
340                 tmpl_data.append(line[:-1].split(","))
341         return tmpl_data
342     except IOError as err:
343         raise PresentationError(str(err), level="ERROR")
344
345
346 def table_performance_comparison(table, input_data):
347     """Generate the table(s) with algorithm: table_performance_comparison
348     specified in the specification file.
349
350     :param table: Table to generate.
351     :param input_data: Data to process.
352     :type table: pandas.Series
353     :type input_data: InputData
354     """
355
356     logging.info("  Generating the table {0} ...".
357                  format(table.get("title", "")))
358
359     # Transform the data
360     data = input_data.filter_data(table, continue_on_error=True)
361
362     # Prepare the header of the tables
363     try:
364         header = ["Test case", ]
365
366         history = table.get("history", None)
367         if history:
368             for item in history:
369                 header.extend(
370                     ["{0} Throughput [Mpps]".format(item["title"]),
371                      "{0} Stdev [Mpps]".format(item["title"])])
372         header.extend(
373             ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
374              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
375              "{0} Throughput [Mpps]".format(table["compare"]["title"]),
376              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
377              "Change [%]"])
378         header_str = ",".join(header) + "\n"
379     except (AttributeError, KeyError) as err:
380         logging.error("The model is invalid, missing parameter: {0}".
381                       format(err))
382         return
383
384     # Prepare data to the table:
385     tbl_dict = dict()
386     for job, builds in table["reference"]["data"].items():
387         for build in builds:
388             for tst_name, tst_data in data[job][str(build)].iteritems():
389                 if tbl_dict.get(tst_name, None) is None:
390                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
391                                             "-".join(tst_data["name"].
392                                                      split("-")[1:]))
393                     tbl_dict[tst_name] = {"name": name,
394                                           "ref-data": list(),
395                                           "cmp-data": list()}
396                 try:
397                     tbl_dict[tst_name]["ref-data"].\
398                         append(tst_data["throughput"]["value"])
399                 except TypeError:
400                     pass  # No data in output.xml for this test
401
402     for job, builds in table["compare"]["data"].items():
403         for build in builds:
404             for tst_name, tst_data in data[job][str(build)].iteritems():
405                 try:
406                     tbl_dict[tst_name]["cmp-data"].\
407                         append(tst_data["throughput"]["value"])
408                 except KeyError:
409                     pass
410                 except TypeError:
411                     tbl_dict.pop(tst_name, None)
412     if history:
413         for item in history:
414             for job, builds in item["data"].items():
415                 for build in builds:
416                     for tst_name, tst_data in data[job][str(build)].iteritems():
417                         if tbl_dict.get(tst_name, None) is None:
418                             continue
419                         if tbl_dict[tst_name].get("history", None) is None:
420                             tbl_dict[tst_name]["history"] = OrderedDict()
421                         if tbl_dict[tst_name]["history"].get(item["title"],
422                                                              None) is None:
423                             tbl_dict[tst_name]["history"][item["title"]] = \
424                                 list()
425                         try:
426                             tbl_dict[tst_name]["history"][item["title"]].\
427                                 append(tst_data["throughput"]["value"])
428                         except (TypeError, KeyError):
429                             pass
430
431     tbl_lst = list()
432     for tst_name in tbl_dict.keys():
433         item = [tbl_dict[tst_name]["name"], ]
434         if history:
435             for hist_list in tbl_dict[tst_name]["history"].values():
436                 for hist_data in hist_list:
437                     if hist_data:
438                         data_t = remove_outliers(
439                             hist_data, outlier_const=table["outlier-const"])
440                         if data_t:
441                             item.append(round(mean(data_t) / 1000000, 2))
442                             item.append(round(stdev(data_t) / 1000000, 2))
443                         else:
444                             item.extend([None, None])
445                     else:
446                         item.extend([None, None])
447         if tbl_dict[tst_name]["ref-data"]:
448             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
449                                      outlier_const=table["outlier-const"])
450             # TODO: Specify window size.
451             if data_t:
452                 item.append(round(mean(data_t) / 1000000, 2))
453                 item.append(round(stdev(data_t) / 1000000, 2))
454             else:
455                 item.extend([None, None])
456         else:
457             item.extend([None, None])
458         if tbl_dict[tst_name]["cmp-data"]:
459             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
460                                      outlier_const=table["outlier-const"])
461             # TODO: Specify window size.
462             if data_t:
463                 item.append(round(mean(data_t) / 1000000, 2))
464                 item.append(round(stdev(data_t) / 1000000, 2))
465             else:
466                 item.extend([None, None])
467         else:
468             item.extend([None, None])
469         if item[-5] is not None and item[-3] is not None and item[-5] != 0:
470             item.append(int(relative_change(float(item[-5]), float(item[-3]))))
471         if len(item) == len(header):
472             tbl_lst.append(item)
473
474     # Sort the table according to the relative change
475     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
476
477     # Generate tables:
478     # All tests in csv:
479     tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
480                                                table["output-file-ext"]),
481                  "{0}-ndr-2t2c-full{1}".format(table["output-file"],
482                                                table["output-file-ext"]),
483                  "{0}-ndr-4t4c-full{1}".format(table["output-file"],
484                                                table["output-file-ext"]),
485                  "{0}-pdr-1t1c-full{1}".format(table["output-file"],
486                                                table["output-file-ext"]),
487                  "{0}-pdr-2t2c-full{1}".format(table["output-file"],
488                                                table["output-file-ext"]),
489                  "{0}-pdr-4t4c-full{1}".format(table["output-file"],
490                                                table["output-file-ext"])
491                  ]
492     for file_name in tbl_names:
493         logging.info("      Writing file: '{0}'".format(file_name))
494         with open(file_name, "w") as file_handler:
495             file_handler.write(header_str)
496             for test in tbl_lst:
497                 if (file_name.split("-")[-3] in test[0] and    # NDR vs PDR
498                         file_name.split("-")[-2] in test[0]):  # cores
499                     test[0] = "-".join(test[0].split("-")[:-1])
500                     file_handler.write(",".join([str(item) for item in test]) +
501                                        "\n")
502
503     # All tests in txt:
504     tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
505                      "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
506                      "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
507                      "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
508                      "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
509                      "{0}-pdr-4t4c-full.txt".format(table["output-file"])
510                      ]
511
512     for i, txt_name in enumerate(tbl_names_txt):
513         txt_table = None
514         logging.info("      Writing file: '{0}'".format(txt_name))
515         with open(tbl_names[i], 'rb') as csv_file:
516             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
517             for row in csv_content:
518                 if txt_table is None:
519                     txt_table = prettytable.PrettyTable(row)
520                 else:
521                     txt_table.add_row(row)
522             txt_table.align["Test case"] = "l"
523         with open(txt_name, "w") as txt_file:
524             txt_file.write(str(txt_table))
525
526     # Selected tests in csv:
527     input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
528                                                table["output-file-ext"])
529     with open(input_file, "r") as in_file:
530         lines = list()
531         for line in in_file:
532             lines.append(line)
533
534     output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
535                                                table["output-file-ext"])
536     logging.info("      Writing file: '{0}'".format(output_file))
537     with open(output_file, "w") as out_file:
538         out_file.write(header_str)
539         for i, line in enumerate(lines[1:]):
540             if i == table["nr-of-tests-shown"]:
541                 break
542             out_file.write(line)
543
544     output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
545                                                   table["output-file-ext"])
546     logging.info("      Writing file: '{0}'".format(output_file))
547     with open(output_file, "w") as out_file:
548         out_file.write(header_str)
549         for i, line in enumerate(lines[-1:0:-1]):
550             if i == table["nr-of-tests-shown"]:
551                 break
552             out_file.write(line)
553
554     input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
555                                                table["output-file-ext"])
556     with open(input_file, "r") as in_file:
557         lines = list()
558         for line in in_file:
559             lines.append(line)
560
561     output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
562                                                table["output-file-ext"])
563     logging.info("      Writing file: '{0}'".format(output_file))
564     with open(output_file, "w") as out_file:
565         out_file.write(header_str)
566         for i, line in enumerate(lines[1:]):
567             if i == table["nr-of-tests-shown"]:
568                 break
569             out_file.write(line)
570
571     output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
572                                                   table["output-file-ext"])
573     logging.info("      Writing file: '{0}'".format(output_file))
574     with open(output_file, "w") as out_file:
575         out_file.write(header_str)
576         for i, line in enumerate(lines[-1:0:-1]):
577             if i == table["nr-of-tests-shown"]:
578                 break
579             out_file.write(line)
580
581
582 def table_performance_comparison_mrr(table, input_data):
583     """Generate the table(s) with algorithm: table_performance_comparison_mrr
584     specified in the specification file.
585
586     :param table: Table to generate.
587     :param input_data: Data to process.
588     :type table: pandas.Series
589     :type input_data: InputData
590     """
591
592     logging.info("  Generating the table {0} ...".
593                  format(table.get("title", "")))
594
595     # Transform the data
596     data = input_data.filter_data(table, continue_on_error=True)
597
598     # Prepare the header of the tables
599     try:
600         header = ["Test case",
601                   "{0} Throughput [Mpps]".format(table["reference"]["title"]),
602                   "{0} stdev [Mpps]".format(table["reference"]["title"]),
603                   "{0} Throughput [Mpps]".format(table["compare"]["title"]),
604                   "{0} stdev [Mpps]".format(table["compare"]["title"]),
605                   "Change [%]"]
606         header_str = ",".join(header) + "\n"
607     except (AttributeError, KeyError) as err:
608         logging.error("The model is invalid, missing parameter: {0}".
609                       format(err))
610         return
611
612     # Prepare data to the table:
613     tbl_dict = dict()
614     for job, builds in table["reference"]["data"].items():
615         for build in builds:
616             for tst_name, tst_data in data[job][str(build)].iteritems():
617                 if tbl_dict.get(tst_name, None) is None:
618                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
619                                             "-".join(tst_data["name"].
620                                                      split("-")[1:]))
621                     tbl_dict[tst_name] = {"name": name,
622                                           "ref-data": list(),
623                                           "cmp-data": list()}
624                 try:
625                     tbl_dict[tst_name]["ref-data"].\
626                         append(tst_data["result"]["throughput"])
627                 except TypeError:
628                     pass  # No data in output.xml for this test
629
630     for job, builds in table["compare"]["data"].items():
631         for build in builds:
632             for tst_name, tst_data in data[job][str(build)].iteritems():
633                 try:
634                     tbl_dict[tst_name]["cmp-data"].\
635                         append(tst_data["result"]["throughput"])
636                 except KeyError:
637                     pass
638                 except TypeError:
639                     tbl_dict.pop(tst_name, None)
640
641     tbl_lst = list()
642     for tst_name in tbl_dict.keys():
643         item = [tbl_dict[tst_name]["name"], ]
644         if tbl_dict[tst_name]["ref-data"]:
645             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
646                                      outlier_const=table["outlier-const"])
647             # TODO: Specify window size.
648             if data_t:
649                 item.append(round(mean(data_t) / 1000000, 2))
650                 item.append(round(stdev(data_t) / 1000000, 2))
651             else:
652                 item.extend([None, None])
653         else:
654             item.extend([None, None])
655         if tbl_dict[tst_name]["cmp-data"]:
656             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
657                                      outlier_const=table["outlier-const"])
658             # TODO: Specify window size.
659             if data_t:
660                 item.append(round(mean(data_t) / 1000000, 2))
661                 item.append(round(stdev(data_t) / 1000000, 2))
662             else:
663                 item.extend([None, None])
664         else:
665             item.extend([None, None])
666         if item[1] is not None and item[3] is not None and item[1] != 0:
667             item.append(int(relative_change(float(item[1]), float(item[3]))))
668         if len(item) == 6:
669             tbl_lst.append(item)
670
671     # Sort the table according to the relative change
672     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
673
674     # Generate tables:
675     # All tests in csv:
676     tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
677                                            table["output-file-ext"]),
678                  "{0}-2t2c-full{1}".format(table["output-file"],
679                                            table["output-file-ext"]),
680                  "{0}-4t4c-full{1}".format(table["output-file"],
681                                            table["output-file-ext"])
682                  ]
683     for file_name in tbl_names:
684         logging.info("      Writing file: '{0}'".format(file_name))
685         with open(file_name, "w") as file_handler:
686             file_handler.write(header_str)
687             for test in tbl_lst:
688                 if file_name.split("-")[-2] in test[0]:  # cores
689                     test[0] = "-".join(test[0].split("-")[:-1])
690                     file_handler.write(",".join([str(item) for item in test]) +
691                                        "\n")
692
693     # All tests in txt:
694     tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
695                      "{0}-2t2c-full.txt".format(table["output-file"]),
696                      "{0}-4t4c-full.txt".format(table["output-file"])
697                      ]
698
699     for i, txt_name in enumerate(tbl_names_txt):
700         txt_table = None
701         logging.info("      Writing file: '{0}'".format(txt_name))
702         with open(tbl_names[i], 'rb') as csv_file:
703             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
704             for row in csv_content:
705                 if txt_table is None:
706                     txt_table = prettytable.PrettyTable(row)
707                 else:
708                     txt_table.add_row(row)
709             txt_table.align["Test case"] = "l"
710         with open(txt_name, "w") as txt_file:
711             txt_file.write(str(txt_table))
712
713
714 def table_performance_trending_dashboard(table, input_data):
715     """Generate the table(s) with algorithm: table_performance_comparison
716     specified in the specification file.
717
718     :param table: Table to generate.
719     :param input_data: Data to process.
720     :type table: pandas.Series
721     :type input_data: InputData
722     """
723
724     logging.info("  Generating the table {0} ...".
725                  format(table.get("title", "")))
726
727     # Transform the data
728     data = input_data.filter_data(table, continue_on_error=True)
729
730     # Prepare the header of the tables
731     header = ["Test Case",
732               "Trend [Mpps]",
733               "Short-Term Change [%]",
734               "Long-Term Change [%]",
735               "Regressions [#]",
736               "Progressions [#]",
737               "Outliers [#]"
738               ]
739     header_str = ",".join(header) + "\n"
740
741     # Prepare data to the table:
742     tbl_dict = dict()
743     for job, builds in table["data"].items():
744         for build in builds:
745             for tst_name, tst_data in data[job][str(build)].iteritems():
746                 if tbl_dict.get(tst_name, None) is None:
747                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
748                                             "-".join(tst_data["name"].
749                                                      split("-")[1:]))
750                     tbl_dict[tst_name] = {"name": name,
751                                           "data": dict()}
752                 try:
753                     tbl_dict[tst_name]["data"][str(build)] =  \
754                         tst_data["result"]["throughput"]
755                 except (TypeError, KeyError):
756                     pass  # No data in output.xml for this test
757
758     tbl_lst = list()
759     for tst_name in tbl_dict.keys():
760         if len(tbl_dict[tst_name]["data"]) > 2:
761
762             pd_data = pd.Series(tbl_dict[tst_name]["data"])
763             last_key = pd_data.keys()[-1]
764             win_size = min(pd_data.size, table["window"])
765             win_first_idx = pd_data.size - win_size
766             key_14 = pd_data.keys()[win_first_idx]
767             long_win_size = min(pd_data.size, table["long-trend-window"])
768
769             data_t, _ = split_outliers(pd_data, outlier_const=1.5,
770                                        window=win_size)
771
772             median_t = data_t.rolling(window=win_size, min_periods=2).median()
773             stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
774             median_first_idx = pd_data.size - long_win_size
775             try:
776                 max_median = max([x for x in median_t.values[median_first_idx:]
777                                   if not isnan(x)])
778             except ValueError:
779                 max_median = nan
780             try:
781                 last_median_t = median_t[last_key]
782             except KeyError:
783                 last_median_t = nan
784             try:
785                 median_t_14 = median_t[key_14]
786             except KeyError:
787                 median_t_14 = nan
788
789             # Test name:
790             name = tbl_dict[tst_name]["name"]
791
792             logging.info("{}".format(name))
793             logging.info("pd_data : {}".format(pd_data))
794             logging.info("data_t : {}".format(data_t))
795             logging.info("median_t : {}".format(median_t))
796             logging.info("last_median_t : {}".format(last_median_t))
797             logging.info("median_t_14 : {}".format(median_t_14))
798             logging.info("max_median : {}".format(max_median))
799
800             # Classification list:
801             classification_lst = list()
802             for build_nr, value in pd_data.iteritems():
803
804                 if isnan(data_t[build_nr]) \
805                         or isnan(median_t[build_nr]) \
806                         or isnan(stdev_t[build_nr]) \
807                         or isnan(value):
808                     classification_lst.append("outlier")
809                 elif value < (median_t[build_nr] - 2 * stdev_t[build_nr]):
810                     classification_lst.append("regression")
811                 elif value > (median_t[build_nr] + 2 * stdev_t[build_nr]):
812                     classification_lst.append("progression")
813                 else:
814                     classification_lst.append("normal")
815
816             if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
817                 rel_change_last = nan
818             else:
819                 rel_change_last = round(
820                     ((last_median_t - median_t_14) / median_t_14) * 100, 2)
821
822             if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
823                 rel_change_long = nan
824             else:
825                 rel_change_long = round(
826                     ((last_median_t - max_median) / max_median) * 100, 2)
827
828             logging.info("rel_change_last : {}".format(rel_change_last))
829             logging.info("rel_change_long : {}".format(rel_change_long))
830
831             tbl_lst.append(
832                 [name,
833                  '-' if isnan(last_median_t) else
834                  round(last_median_t / 1000000, 2),
835                  '-' if isnan(rel_change_last) else rel_change_last,
836                  '-' if isnan(rel_change_long) else rel_change_long,
837                  classification_lst[win_first_idx:].count("regression"),
838                  classification_lst[win_first_idx:].count("progression"),
839                  classification_lst[win_first_idx:].count("outlier")])
840
841     tbl_lst.sort(key=lambda rel: rel[0])
842
843     tbl_sorted = list()
844     for nrr in range(table["window"], -1, -1):
845         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
846         for nrp in range(table["window"], -1, -1):
847             tbl_pro = [item for item in tbl_reg if item[5] == nrp]
848             for nro in range(table["window"], -1, -1):
849                 tbl_out = [item for item in tbl_pro if item[6] == nro]
850                 tbl_out.sort(key=lambda rel: rel[2])
851                 tbl_sorted.extend(tbl_out)
852
853     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
854
855     logging.info("      Writing file: '{0}'".format(file_name))
856     with open(file_name, "w") as file_handler:
857         file_handler.write(header_str)
858         for test in tbl_sorted:
859             file_handler.write(",".join([str(item) for item in test]) + '\n')
860
861     txt_file_name = "{0}.txt".format(table["output-file"])
862     txt_table = None
863     logging.info("      Writing file: '{0}'".format(txt_file_name))
864     with open(file_name, 'rb') as csv_file:
865         csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
866         for row in csv_content:
867             if txt_table is None:
868                 txt_table = prettytable.PrettyTable(row)
869             else:
870                 txt_table.add_row(row)
871         txt_table.align["Test case"] = "l"
872     with open(txt_file_name, "w") as txt_file:
873         txt_file.write(str(txt_table))
874
875
876 def table_performance_trending_dashboard_html(table, input_data):
877     """Generate the table(s) with algorithm:
878     table_performance_trending_dashboard_html specified in the specification
879     file.
880
881     :param table: Table to generate.
882     :param input_data: Data to process.
883     :type table: pandas.Series
884     :type input_data: InputData
885     """
886
887     logging.info("  Generating the table {0} ...".
888                  format(table.get("title", "")))
889
890     try:
891         with open(table["input-file"], 'rb') as csv_file:
892             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
893             csv_lst = [item for item in csv_content]
894     except KeyError:
895         logging.warning("The input file is not defined.")
896         return
897     except csv.Error as err:
898         logging.warning("Not possible to process the file '{0}'.\n{1}".
899                         format(table["input-file"], err))
900         return
901
902     # Table:
903     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
904
905     # Table header:
906     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
907     for idx, item in enumerate(csv_lst[0]):
908         alignment = "left" if idx == 0 else "center"
909         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
910         th.text = item
911
912     # Rows:
913     for r_idx, row in enumerate(csv_lst[1:]):
914         background = "#D4E4F7" if r_idx % 2 else "white"
915         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
916
917         # Columns:
918         for c_idx, item in enumerate(row):
919             alignment = "left" if c_idx == 0 else "center"
920             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
921             # Name:
922             url = "../trending/"
923             file_name = ""
924             anchor = "#"
925             feature = ""
926             if c_idx == 0:
927                 if "memif" in item:
928                     file_name = "container_memif.html"
929
930                 elif "vhost" in item:
931                     if "l2xcbase" in item or "l2bdbasemaclrn" in item:
932                         file_name = "vm_vhost_l2.html"
933                     elif "ip4base" in item:
934                         file_name = "vm_vhost_ip4.html"
935
936                 elif "ipsec" in item:
937                     file_name = "ipsec.html"
938
939                 elif "ethip4lispip" in item or "ethip4vxlan" in item:
940                     file_name = "ip4_tunnels.html"
941
942                 elif "ip4base" in item or "ip4scale" in item:
943                     file_name = "ip4.html"
944                     if "iacl" in item or "snat" in item or "cop" in item:
945                         feature = "-features"
946
947                 elif "ip6base" in item or "ip6scale" in item:
948                     file_name = "ip6.html"
949
950                 elif "l2xcbase" in item or "l2xcscale" in item \
951                         or "l2bdbasemaclrn" in item or "l2bdscale" in item \
952                         or "l2dbbasemaclrn" in item or "l2dbscale" in item:
953                     file_name = "l2.html"
954                     if "iacl" in item:
955                         feature = "-features"
956
957                 if "x520" in item:
958                     anchor += "x520-"
959                 elif "x710" in item:
960                     anchor += "x710-"
961                 elif "xl710" in item:
962                     anchor += "xl710-"
963
964                 if "64b" in item:
965                     anchor += "64b-"
966                 elif "78b" in item:
967                     anchor += "78b"
968                 elif "imix" in item:
969                     anchor += "imix-"
970                 elif "9000b" in item:
971                     anchor += "9000b-"
972                 elif "1518" in item:
973                     anchor += "1518b-"
974
975                 if "1t1c" in item:
976                     anchor += "1t1c"
977                 elif "2t2c" in item:
978                     anchor += "2t2c"
979                 elif "4t4c" in item:
980                     anchor += "4t4c"
981
982                 url = url + file_name + anchor + feature
983
984                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
985                 ref.text = item
986
987             if c_idx > 0:
988                 td.text = item
989
990     try:
991         with open(table["output-file"], 'w') as html_file:
992             logging.info("      Writing file: '{0}'".
993                          format(table["output-file"]))
994             html_file.write(".. raw:: html\n\n\t")
995             html_file.write(ET.tostring(dashboard))
996             html_file.write("\n\t<p><br><br></p>\n")
997     except KeyError:
998         logging.warning("The output file is not defined.")
999         return