CSIT-1041: Trending dashboard
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import prettytable
21 import pandas as pd
22
23 from string import replace
24 from math import isnan
25 from numpy import nan
26 from xml.etree import ElementTree as ET
27
28 from errors import PresentationError
29 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
30
31
32 def generate_tables(spec, data):
33     """Generate all tables specified in the specification file.
34
35     :param spec: Specification read from the specification file.
36     :param data: Data to process.
37     :type spec: Specification
38     :type data: InputData
39     """
40
41     logging.info("Generating the tables ...")
42     for table in spec.tables:
43         try:
44             eval(table["algorithm"])(table, data)
45         except NameError:
46             logging.error("The algorithm '{0}' is not defined.".
47                           format(table["algorithm"]))
48     logging.info("Done.")
49
50
51 def table_details(table, input_data):
52     """Generate the table(s) with algorithm: table_detailed_test_results
53     specified in the specification file.
54
55     :param table: Table to generate.
56     :param input_data: Data to process.
57     :type table: pandas.Series
58     :type input_data: InputData
59     """
60
61     logging.info("  Generating the table {0} ...".
62                  format(table.get("title", "")))
63
64     # Transform the data
65     data = input_data.filter_data(table)
66
67     # Prepare the header of the tables
68     header = list()
69     for column in table["columns"]:
70         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
71
72     # Generate the data for the table according to the model in the table
73     # specification
74     job = table["data"].keys()[0]
75     build = str(table["data"][job][0])
76     try:
77         suites = input_data.suites(job, build)
78     except KeyError:
79         logging.error("    No data available. The table will not be generated.")
80         return
81
82     for suite_longname, suite in suites.iteritems():
83         # Generate data
84         suite_name = suite["name"]
85         table_lst = list()
86         for test in data[job][build].keys():
87             if data[job][build][test]["parent"] in suite_name:
88                 row_lst = list()
89                 for column in table["columns"]:
90                     try:
91                         col_data = str(data[job][build][test][column["data"].
92                                        split(" ")[1]]).replace('"', '""')
93                         if column["data"].split(" ")[1] in ("vat-history",
94                                                             "show-run"):
95                             col_data = replace(col_data, " |br| ", "",
96                                                maxreplace=1)
97                             col_data = " |prein| {0} |preout| ".\
98                                 format(col_data[:-5])
99                         row_lst.append('"{0}"'.format(col_data))
100                     except KeyError:
101                         row_lst.append("No data")
102                 table_lst.append(row_lst)
103
104         # Write the data to file
105         if table_lst:
106             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
107                                             table["output-file-ext"])
108             logging.info("      Writing file: '{}'".format(file_name))
109             with open(file_name, "w") as file_handler:
110                 file_handler.write(",".join(header) + "\n")
111                 for item in table_lst:
112                     file_handler.write(",".join(item) + "\n")
113
114     logging.info("  Done.")
115
116
117 def table_merged_details(table, input_data):
118     """Generate the table(s) with algorithm: table_merged_details
119     specified in the specification file.
120
121     :param table: Table to generate.
122     :param input_data: Data to process.
123     :type table: pandas.Series
124     :type input_data: InputData
125     """
126
127     logging.info("  Generating the table {0} ...".
128                  format(table.get("title", "")))
129
130     # Transform the data
131     data = input_data.filter_data(table)
132     data = input_data.merge_data(data)
133     data.sort_index(inplace=True)
134
135     suites = input_data.filter_data(table, data_set="suites")
136     suites = input_data.merge_data(suites)
137
138     # Prepare the header of the tables
139     header = list()
140     for column in table["columns"]:
141         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
142
143     for _, suite in suites.iteritems():
144         # Generate data
145         suite_name = suite["name"]
146         table_lst = list()
147         for test in data.keys():
148             if data[test]["parent"] in suite_name:
149                 row_lst = list()
150                 for column in table["columns"]:
151                     try:
152                         col_data = str(data[test][column["data"].
153                                        split(" ")[1]]).replace('"', '""')
154                         if column["data"].split(" ")[1] in ("vat-history",
155                                                             "show-run"):
156                             col_data = replace(col_data, " |br| ", "",
157                                                maxreplace=1)
158                             col_data = " |prein| {0} |preout| ".\
159                                 format(col_data[:-5])
160                         row_lst.append('"{0}"'.format(col_data))
161                     except KeyError:
162                         row_lst.append("No data")
163                 table_lst.append(row_lst)
164
165         # Write the data to file
166         if table_lst:
167             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
168                                             table["output-file-ext"])
169             logging.info("      Writing file: '{}'".format(file_name))
170             with open(file_name, "w") as file_handler:
171                 file_handler.write(",".join(header) + "\n")
172                 for item in table_lst:
173                     file_handler.write(",".join(item) + "\n")
174
175     logging.info("  Done.")
176
177
178 def table_performance_improvements(table, input_data):
179     """Generate the table(s) with algorithm: table_performance_improvements
180     specified in the specification file.
181
182     :param table: Table to generate.
183     :param input_data: Data to process.
184     :type table: pandas.Series
185     :type input_data: InputData
186     """
187
188     def _write_line_to_file(file_handler, data):
189         """Write a line to the .csv file.
190
191         :param file_handler: File handler for the csv file. It must be open for
192          writing text.
193         :param data: Item to be written to the file.
194         :type file_handler: BinaryIO
195         :type data: list
196         """
197
198         line_lst = list()
199         for item in data:
200             if isinstance(item["data"], str):
201                 # Remove -?drdisc from the end
202                 if item["data"].endswith("drdisc"):
203                     item["data"] = item["data"][:-8]
204                 line_lst.append(item["data"])
205             elif isinstance(item["data"], float):
206                 line_lst.append("{:.1f}".format(item["data"]))
207             elif item["data"] is None:
208                 line_lst.append("")
209         file_handler.write(",".join(line_lst) + "\n")
210
211     logging.info("  Generating the table {0} ...".
212                  format(table.get("title", "")))
213
214     # Read the template
215     file_name = table.get("template", None)
216     if file_name:
217         try:
218             tmpl = _read_csv_template(file_name)
219         except PresentationError:
220             logging.error("  The template '{0}' does not exist. Skipping the "
221                           "table.".format(file_name))
222             return None
223     else:
224         logging.error("The template is not defined. Skipping the table.")
225         return None
226
227     # Transform the data
228     data = input_data.filter_data(table)
229
230     # Prepare the header of the tables
231     header = list()
232     for column in table["columns"]:
233         header.append(column["title"])
234
235     # Generate the data for the table according to the model in the table
236     # specification
237     tbl_lst = list()
238     for tmpl_item in tmpl:
239         tbl_item = list()
240         for column in table["columns"]:
241             cmd = column["data"].split(" ")[0]
242             args = column["data"].split(" ")[1:]
243             if cmd == "template":
244                 try:
245                     val = float(tmpl_item[int(args[0])])
246                 except ValueError:
247                     val = tmpl_item[int(args[0])]
248                 tbl_item.append({"data": val})
249             elif cmd == "data":
250                 jobs = args[0:-1]
251                 operation = args[-1]
252                 data_lst = list()
253                 for job in jobs:
254                     for build in data[job]:
255                         try:
256                             data_lst.append(float(build[tmpl_item[0]]
257                                                   ["throughput"]["value"]))
258                         except (KeyError, TypeError):
259                             # No data, ignore
260                             continue
261                 if data_lst:
262                     tbl_item.append({"data": (eval(operation)(data_lst)) /
263                                              1000000})
264                 else:
265                     tbl_item.append({"data": None})
266             elif cmd == "operation":
267                 operation = args[0]
268                 try:
269                     nr1 = float(tbl_item[int(args[1])]["data"])
270                     nr2 = float(tbl_item[int(args[2])]["data"])
271                     if nr1 and nr2:
272                         tbl_item.append({"data": eval(operation)(nr1, nr2)})
273                     else:
274                         tbl_item.append({"data": None})
275                 except (IndexError, ValueError, TypeError):
276                     logging.error("No data for {0}".format(tbl_item[0]["data"]))
277                     tbl_item.append({"data": None})
278                     continue
279             else:
280                 logging.error("Not supported command {0}. Skipping the table.".
281                               format(cmd))
282                 return None
283         tbl_lst.append(tbl_item)
284
285     # Sort the table according to the relative change
286     tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
287
288     # Create the tables and write them to the files
289     file_names = [
290         "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
291         "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
292         "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
293         "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
294     ]
295
296     for file_name in file_names:
297         logging.info("    Writing the file '{0}'".format(file_name))
298         with open(file_name, "w") as file_handler:
299             file_handler.write(",".join(header) + "\n")
300             for item in tbl_lst:
301                 if isinstance(item[-1]["data"], float):
302                     rel_change = round(item[-1]["data"], 1)
303                 else:
304                     rel_change = item[-1]["data"]
305                 if "ndr_top" in file_name \
306                         and "ndr" in item[0]["data"] \
307                         and rel_change >= 10.0:
308                     _write_line_to_file(file_handler, item)
309                 elif "pdr_top" in file_name \
310                         and "pdr" in item[0]["data"] \
311                         and rel_change >= 10.0:
312                     _write_line_to_file(file_handler, item)
313                 elif "ndr_low" in file_name \
314                         and "ndr" in item[0]["data"] \
315                         and rel_change < 10.0:
316                     _write_line_to_file(file_handler, item)
317                 elif "pdr_low" in file_name \
318                         and "pdr" in item[0]["data"] \
319                         and rel_change < 10.0:
320                     _write_line_to_file(file_handler, item)
321
322     logging.info("  Done.")
323
324
325 def _read_csv_template(file_name):
326     """Read the template from a .csv file.
327
328     :param file_name: Name / full path / relative path of the file to read.
329     :type file_name: str
330     :returns: Data from the template as list (lines) of lists (items on line).
331     :rtype: list
332     :raises: PresentationError if it is not possible to read the file.
333     """
334
335     try:
336         with open(file_name, 'r') as csv_file:
337             tmpl_data = list()
338             for line in csv_file:
339                 tmpl_data.append(line[:-1].split(","))
340         return tmpl_data
341     except IOError as err:
342         raise PresentationError(str(err), level="ERROR")
343
344
345 def table_performance_comparison(table, input_data):
346     """Generate the table(s) with algorithm: table_performance_comparison
347     specified in the specification file.
348
349     :param table: Table to generate.
350     :param input_data: Data to process.
351     :type table: pandas.Series
352     :type input_data: InputData
353     """
354
355     logging.info("  Generating the table {0} ...".
356                  format(table.get("title", "")))
357
358     # Transform the data
359     data = input_data.filter_data(table, continue_on_error=True)
360
361     # Prepare the header of the tables
362     try:
363         header = ["Test case",
364                   "{0} Throughput [Mpps]".format(table["reference"]["title"]),
365                   "{0} stdev [Mpps]".format(table["reference"]["title"]),
366                   "{0} Throughput [Mpps]".format(table["compare"]["title"]),
367                   "{0} stdev [Mpps]".format(table["compare"]["title"]),
368                   "Change [%]"]
369         header_str = ",".join(header) + "\n"
370     except (AttributeError, KeyError) as err:
371         logging.error("The model is invalid, missing parameter: {0}".
372                       format(err))
373         return
374
375     # Prepare data to the table:
376     tbl_dict = dict()
377     for job, builds in table["reference"]["data"].items():
378         for build in builds:
379             for tst_name, tst_data in data[job][str(build)].iteritems():
380                 if tbl_dict.get(tst_name, None) is None:
381                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
382                                             "-".join(tst_data["name"].
383                                                      split("-")[1:]))
384                     tbl_dict[tst_name] = {"name": name,
385                                           "ref-data": list(),
386                                           "cmp-data": list()}
387                 try:
388                     tbl_dict[tst_name]["ref-data"].\
389                         append(tst_data["throughput"]["value"])
390                 except TypeError:
391                     pass  # No data in output.xml for this test
392
393     for job, builds in table["compare"]["data"].items():
394         for build in builds:
395             for tst_name, tst_data in data[job][str(build)].iteritems():
396                 try:
397                     tbl_dict[tst_name]["cmp-data"].\
398                         append(tst_data["throughput"]["value"])
399                 except KeyError:
400                     pass
401                 except TypeError:
402                     tbl_dict.pop(tst_name, None)
403
404     tbl_lst = list()
405     for tst_name in tbl_dict.keys():
406         item = [tbl_dict[tst_name]["name"], ]
407         if tbl_dict[tst_name]["ref-data"]:
408             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
409                                      outlier_const=table["outlier-const"])
410             # TODO: Specify window size.
411             if data_t:
412                 item.append(round(mean(data_t) / 1000000, 2))
413                 item.append(round(stdev(data_t) / 1000000, 2))
414             else:
415                 item.extend([None, None])
416         else:
417             item.extend([None, None])
418         if tbl_dict[tst_name]["cmp-data"]:
419             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
420                                      outlier_const=table["outlier-const"])
421             # TODO: Specify window size.
422             if data_t:
423                 item.append(round(mean(data_t) / 1000000, 2))
424                 item.append(round(stdev(data_t) / 1000000, 2))
425             else:
426                 item.extend([None, None])
427         else:
428             item.extend([None, None])
429         if item[1] is not None and item[3] is not None:
430             item.append(int(relative_change(float(item[1]), float(item[3]))))
431         if len(item) == 6:
432             tbl_lst.append(item)
433
434     # Sort the table according to the relative change
435     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
436
437     # Generate tables:
438     # All tests in csv:
439     tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
440                                                table["output-file-ext"]),
441                  "{0}-ndr-2t2c-full{1}".format(table["output-file"],
442                                                table["output-file-ext"]),
443                  "{0}-ndr-4t4c-full{1}".format(table["output-file"],
444                                                table["output-file-ext"]),
445                  "{0}-pdr-1t1c-full{1}".format(table["output-file"],
446                                                table["output-file-ext"]),
447                  "{0}-pdr-2t2c-full{1}".format(table["output-file"],
448                                                table["output-file-ext"]),
449                  "{0}-pdr-4t4c-full{1}".format(table["output-file"],
450                                                table["output-file-ext"])
451                  ]
452     for file_name in tbl_names:
453         logging.info("      Writing file: '{0}'".format(file_name))
454         with open(file_name, "w") as file_handler:
455             file_handler.write(header_str)
456             for test in tbl_lst:
457                 if (file_name.split("-")[-3] in test[0] and    # NDR vs PDR
458                         file_name.split("-")[-2] in test[0]):  # cores
459                     test[0] = "-".join(test[0].split("-")[:-1])
460                     file_handler.write(",".join([str(item) for item in test]) +
461                                        "\n")
462
463     # All tests in txt:
464     tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
465                      "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
466                      "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
467                      "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
468                      "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
469                      "{0}-pdr-4t4c-full.txt".format(table["output-file"])
470                      ]
471
472     for i, txt_name in enumerate(tbl_names_txt):
473         txt_table = None
474         logging.info("      Writing file: '{0}'".format(txt_name))
475         with open(tbl_names[i], 'rb') as csv_file:
476             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
477             for row in csv_content:
478                 if txt_table is None:
479                     txt_table = prettytable.PrettyTable(row)
480                 else:
481                     txt_table.add_row(row)
482             txt_table.align["Test case"] = "l"
483         with open(txt_name, "w") as txt_file:
484             txt_file.write(str(txt_table))
485
486     # Selected tests in csv:
487     input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
488                                                table["output-file-ext"])
489     with open(input_file, "r") as in_file:
490         lines = list()
491         for line in in_file:
492             lines.append(line)
493
494     output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
495                                                table["output-file-ext"])
496     logging.info("      Writing file: '{0}'".format(output_file))
497     with open(output_file, "w") as out_file:
498         out_file.write(header_str)
499         for i, line in enumerate(lines[1:]):
500             if i == table["nr-of-tests-shown"]:
501                 break
502             out_file.write(line)
503
504     output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
505                                                   table["output-file-ext"])
506     logging.info("      Writing file: '{0}'".format(output_file))
507     with open(output_file, "w") as out_file:
508         out_file.write(header_str)
509         for i, line in enumerate(lines[-1:0:-1]):
510             if i == table["nr-of-tests-shown"]:
511                 break
512             out_file.write(line)
513
514     input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
515                                                table["output-file-ext"])
516     with open(input_file, "r") as in_file:
517         lines = list()
518         for line in in_file:
519             lines.append(line)
520
521     output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
522                                                table["output-file-ext"])
523     logging.info("      Writing file: '{0}'".format(output_file))
524     with open(output_file, "w") as out_file:
525         out_file.write(header_str)
526         for i, line in enumerate(lines[1:]):
527             if i == table["nr-of-tests-shown"]:
528                 break
529             out_file.write(line)
530
531     output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
532                                                   table["output-file-ext"])
533     logging.info("      Writing file: '{0}'".format(output_file))
534     with open(output_file, "w") as out_file:
535         out_file.write(header_str)
536         for i, line in enumerate(lines[-1:0:-1]):
537             if i == table["nr-of-tests-shown"]:
538                 break
539             out_file.write(line)
540
541
542 def table_performance_comparison_mrr(table, input_data):
543     """Generate the table(s) with algorithm: table_performance_comparison_mrr
544     specified in the specification file.
545
546     :param table: Table to generate.
547     :param input_data: Data to process.
548     :type table: pandas.Series
549     :type input_data: InputData
550     """
551
552     logging.info("  Generating the table {0} ...".
553                  format(table.get("title", "")))
554
555     # Transform the data
556     data = input_data.filter_data(table, continue_on_error=True)
557
558     # Prepare the header of the tables
559     try:
560         header = ["Test case",
561                   "{0} Throughput [Mpps]".format(table["reference"]["title"]),
562                   "{0} stdev [Mpps]".format(table["reference"]["title"]),
563                   "{0} Throughput [Mpps]".format(table["compare"]["title"]),
564                   "{0} stdev [Mpps]".format(table["compare"]["title"]),
565                   "Change [%]"]
566         header_str = ",".join(header) + "\n"
567     except (AttributeError, KeyError) as err:
568         logging.error("The model is invalid, missing parameter: {0}".
569                       format(err))
570         return
571
572     # Prepare data to the table:
573     tbl_dict = dict()
574     for job, builds in table["reference"]["data"].items():
575         for build in builds:
576             for tst_name, tst_data in data[job][str(build)].iteritems():
577                 if tbl_dict.get(tst_name, None) is None:
578                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
579                                             "-".join(tst_data["name"].
580                                                      split("-")[1:]))
581                     tbl_dict[tst_name] = {"name": name,
582                                           "ref-data": list(),
583                                           "cmp-data": list()}
584                 try:
585                     tbl_dict[tst_name]["ref-data"].\
586                         append(tst_data["result"]["throughput"])
587                 except TypeError:
588                     pass  # No data in output.xml for this test
589
590     for job, builds in table["compare"]["data"].items():
591         for build in builds:
592             for tst_name, tst_data in data[job][str(build)].iteritems():
593                 try:
594                     tbl_dict[tst_name]["cmp-data"].\
595                         append(tst_data["result"]["throughput"])
596                 except KeyError:
597                     pass
598                 except TypeError:
599                     tbl_dict.pop(tst_name, None)
600
601     tbl_lst = list()
602     for tst_name in tbl_dict.keys():
603         item = [tbl_dict[tst_name]["name"], ]
604         if tbl_dict[tst_name]["ref-data"]:
605             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
606                                      outlier_const=table["outlier-const"])
607             # TODO: Specify window size.
608             if data_t:
609                 item.append(round(mean(data_t) / 1000000, 2))
610                 item.append(round(stdev(data_t) / 1000000, 2))
611             else:
612                 item.extend([None, None])
613         else:
614             item.extend([None, None])
615         if tbl_dict[tst_name]["cmp-data"]:
616             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
617                                      outlier_const=table["outlier-const"])
618             # TODO: Specify window size.
619             if data_t:
620                 item.append(round(mean(data_t) / 1000000, 2))
621                 item.append(round(stdev(data_t) / 1000000, 2))
622             else:
623                 item.extend([None, None])
624         else:
625             item.extend([None, None])
626         if item[1] is not None and item[3] is not None and item[1] != 0:
627             item.append(int(relative_change(float(item[1]), float(item[3]))))
628         if len(item) == 6:
629             tbl_lst.append(item)
630
631     # Sort the table according to the relative change
632     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
633
634     # Generate tables:
635     # All tests in csv:
636     tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
637                                            table["output-file-ext"]),
638                  "{0}-2t2c-full{1}".format(table["output-file"],
639                                            table["output-file-ext"]),
640                  "{0}-4t4c-full{1}".format(table["output-file"],
641                                            table["output-file-ext"])
642                  ]
643     for file_name in tbl_names:
644         logging.info("      Writing file: '{0}'".format(file_name))
645         with open(file_name, "w") as file_handler:
646             file_handler.write(header_str)
647             for test in tbl_lst:
648                 if file_name.split("-")[-2] in test[0]:  # cores
649                     test[0] = "-".join(test[0].split("-")[:-1])
650                     file_handler.write(",".join([str(item) for item in test]) +
651                                        "\n")
652
653     # All tests in txt:
654     tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
655                      "{0}-2t2c-full.txt".format(table["output-file"]),
656                      "{0}-4t4c-full.txt".format(table["output-file"])
657                      ]
658
659     for i, txt_name in enumerate(tbl_names_txt):
660         txt_table = None
661         logging.info("      Writing file: '{0}'".format(txt_name))
662         with open(tbl_names[i], 'rb') as csv_file:
663             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
664             for row in csv_content:
665                 if txt_table is None:
666                     txt_table = prettytable.PrettyTable(row)
667                 else:
668                     txt_table.add_row(row)
669             txt_table.align["Test case"] = "l"
670         with open(txt_name, "w") as txt_file:
671             txt_file.write(str(txt_table))
672
673
674 def table_performance_trending_dashboard(table, input_data):
675     """Generate the table(s) with algorithm: table_performance_comparison
676     specified in the specification file.
677
678     :param table: Table to generate.
679     :param input_data: Data to process.
680     :type table: pandas.Series
681     :type input_data: InputData
682     """
683
684     logging.info("  Generating the table {0} ...".
685                  format(table.get("title", "")))
686
687     # Transform the data
688     data = input_data.filter_data(table, continue_on_error=True)
689
690     # Prepare the header of the tables
691     header = ["Test Case",
692               "Trend [Mpps]",
693               "Short-Term Change [%]",
694               "Long-Term Change [%]",
695               "Regressions [#]",
696               "Progressions [#]",
697               "Outliers [#]"
698               ]
699     header_str = ",".join(header) + "\n"
700
701     # Prepare data to the table:
702     tbl_dict = dict()
703     for job, builds in table["data"].items():
704         for build in builds:
705             for tst_name, tst_data in data[job][str(build)].iteritems():
706                 if tbl_dict.get(tst_name, None) is None:
707                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
708                                             "-".join(tst_data["name"].
709                                                      split("-")[1:]))
710                     tbl_dict[tst_name] = {"name": name,
711                                           "data": dict()}
712                 try:
713                     tbl_dict[tst_name]["data"][str(build)] =  \
714                         tst_data["result"]["throughput"]
715                 except (TypeError, KeyError):
716                     pass  # No data in output.xml for this test
717
718     tbl_lst = list()
719     for tst_name in tbl_dict.keys():
720         if len(tbl_dict[tst_name]["data"]) > 2:
721
722             pd_data = pd.Series(tbl_dict[tst_name]["data"])
723             last_key = pd_data.keys()[-1]
724             win_size = min(pd_data.size, table["window"])
725             win_first_idx = pd_data.size - win_size
726             key_14 = pd_data.keys()[-win_first_idx]
727             long_win_size = min(pd_data.size, table["long-trend-window"])
728
729             data_t, _ = split_outliers(pd_data, outlier_const=1.5,
730                                        window=win_size)
731
732             median_t = data_t.rolling(window=win_size, min_periods=2).median()
733             stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
734             median_first_idx = pd_data.size - long_win_size
735             try:
736                 max_median = max([x for x in median_t.values[median_first_idx:]
737                                   if not isnan(x)])
738             except ValueError:
739                 max_median = nan
740             try:
741                 last_median_t = median_t[last_key]
742             except KeyError:
743                 last_median_t = nan
744             try:
745                 median_t_14 = median_t[key_14]
746             except KeyError:
747                 median_t_14 = nan
748
749             # Test name:
750             name = tbl_dict[tst_name]["name"]
751
752             logging.info("{}".format(name))
753             logging.info("pd_data : {}".format(pd_data))
754             logging.info("data_t : {}".format(data_t))
755             logging.info("median_t : {}".format(median_t))
756             logging.info("last_median_t : {}".format(last_median_t))
757             logging.info("median_t_14 : {}".format(median_t_14))
758             logging.info("max_median : {}".format(max_median))
759
760             # Classification list:
761             classification_lst = list()
762             for build_nr, value in pd_data.iteritems():
763
764                 if isnan(data_t[build_nr]) \
765                         or isnan(median_t[build_nr]) \
766                         or isnan(stdev_t[build_nr]) \
767                         or isnan(value):
768                     classification_lst.append("outlier")
769                 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
770                     classification_lst.append("regression")
771                 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
772                     classification_lst.append("progression")
773                 else:
774                     classification_lst.append("normal")
775
776             if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
777                 rel_change_last = nan
778             else:
779                 rel_change_last = round(
780                     (last_median_t - median_t_14) / median_t_14, 2)
781
782             if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
783                 rel_change_long = nan
784             else:
785                 rel_change_long = round(
786                     (last_median_t - max_median) / max_median, 2)
787
788             logging.info("rel_change_last : {}".format(rel_change_last))
789             logging.info("rel_change_long : {}".format(rel_change_long))
790
791             tbl_lst.append(
792                 [name,
793                  '-' if isnan(last_median_t) else
794                  round(last_median_t / 1000000, 2),
795                  '-' if isnan(rel_change_last) else rel_change_last,
796                  '-' if isnan(rel_change_long) else rel_change_long,
797                  classification_lst[win_first_idx:].count("regression"),
798                  classification_lst[win_first_idx:].count("progression"),
799                  classification_lst[win_first_idx:].count("outlier")])
800
801     tbl_lst.sort(key=lambda rel: rel[0])
802
803     tbl_sorted = list()
804     for nrr in range(table["window"], -1, -1):
805         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
806         for nrp in range(table["window"], -1, -1):
807             tbl_pro = [item for item in tbl_reg if item[5] == nrp]
808             for nro in range(table["window"], -1, -1):
809                 tbl_out = [item for item in tbl_pro if item[5] == nro]
810                 tbl_sorted.extend(tbl_out)
811
812     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
813
814     logging.info("      Writing file: '{0}'".format(file_name))
815     with open(file_name, "w") as file_handler:
816         file_handler.write(header_str)
817         for test in tbl_sorted:
818             file_handler.write(",".join([str(item) for item in test]) + '\n')
819
820     txt_file_name = "{0}.txt".format(table["output-file"])
821     txt_table = None
822     logging.info("      Writing file: '{0}'".format(txt_file_name))
823     with open(file_name, 'rb') as csv_file:
824         csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
825         for row in csv_content:
826             if txt_table is None:
827                 txt_table = prettytable.PrettyTable(row)
828             else:
829                 txt_table.add_row(row)
830         txt_table.align["Test case"] = "l"
831     with open(txt_file_name, "w") as txt_file:
832         txt_file.write(str(txt_table))
833
834
835 def table_performance_trending_dashboard_html(table, input_data):
836     """Generate the table(s) with algorithm:
837     table_performance_trending_dashboard_html specified in the specification
838     file.
839
840     :param table: Table to generate.
841     :param input_data: Data to process.
842     :type table: pandas.Series
843     :type input_data: InputData
844     """
845
846     logging.info("  Generating the table {0} ...".
847                  format(table.get("title", "")))
848
849     try:
850         with open(table["input-file"], 'rb') as csv_file:
851             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
852             csv_lst = [item for item in csv_content]
853     except KeyError:
854         logging.warning("The input file is not defined.")
855         return
856     except csv.Error as err:
857         logging.warning("Not possible to process the file '{0}'.\n{1}".
858                         format(table["input-file"], err))
859         return
860
861     # Table:
862     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
863
864     # Table header:
865     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
866     for idx, item in enumerate(csv_lst[0]):
867         alignment = "left" if idx == 0 else "center"
868         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
869         th.text = item
870
871     # Rows:
872     for r_idx, row in enumerate(csv_lst[1:]):
873         background = "#D4E4F7" if r_idx % 2 else "white"
874         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
875
876         # Columns:
877         for c_idx, item in enumerate(row):
878             alignment = "left" if c_idx == 0 else "center"
879             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
880             # Name:
881             url = "../trending/"
882             file_name = ""
883             anchor = "#"
884             feature = ""
885             if c_idx == 0:
886                 if "memif" in item:
887                     file_name = "container_memif.html"
888
889                 elif "vhost" in item:
890                     if "l2xcbase" in item or "l2bdbasemaclrn" in item:
891                         file_name = "vm_vhost_l2.html"
892                     elif "ip4base" in item:
893                         file_name = "vm_vhost_ip4.html"
894
895                 elif "ipsec" in item:
896                     file_name = "ipsec.html"
897
898                 elif "ethip4lispip" in item or "ethip4vxlan" in item:
899                     file_name = "ip4_tunnels.html"
900
901                 elif "ip4base" in item or "ip4scale" in item:
902                     file_name = "ip4.html"
903                     if "iacl" in item or "snat" in item or "cop" in item:
904                         feature = "-features"
905
906                 elif "ip6base" in item or "ip6scale" in item:
907                     file_name = "ip6.html"
908
909                 elif "l2xcbase" in item or "l2xcscale" in item \
910                         or "l2bdbasemaclrn" in item or "l2bdscale" in item \
911                         or "l2dbbasemaclrn" in item or "l2dbscale" in item:
912                     file_name = "l2.html"
913                     if "iacl" in item:
914                         feature = "-features"
915
916                 if "x520" in item:
917                     anchor += "x520-"
918                 elif "x710" in item:
919                     anchor += "x710-"
920                 elif "xl710" in item:
921                     anchor += "xl710-"
922
923                 if "64b" in item:
924                     anchor += "64b-"
925                 elif "78b" in item:
926                     anchor += "78b"
927                 elif "imix" in item:
928                     anchor += "imix-"
929                 elif "9000b" in item:
930                     anchor += "9000b-"
931                 elif "1518" in item:
932                     anchor += "1518b-"
933
934                 if "1t1c" in item:
935                     anchor += "1t1c"
936                 elif "2t2c" in item:
937                     anchor += "2t2c"
938                 elif "4t4c" in item:
939                     anchor += "4t4c"
940
941                 url = url + file_name + anchor + feature
942
943                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
944                 ref.text = item
945
946             if c_idx > 0:
947                 td.text = item
948
949     try:
950         with open(table["output-file"], 'w') as html_file:
951             logging.info("      Writing file: '{0}'".
952                          format(table["output-file"]))
953             html_file.write(".. raw:: html\n\n\t")
954             html_file.write(ET.tostring(dashboard))
955             html_file.write("\n\t<p><br><br></p>\n")
956     except KeyError:
957         logging.warning("The output file is not defined.")
958         return

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.