CSIT-1204: Make new TC names backward compatible (trending)
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
25
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 def generate_tables(spec, data):
32     """Generate all tables specified in the specification file.
33
34     :param spec: Specification read from the specification file.
35     :param data: Data to process.
36     :type spec: Specification
37     :type data: InputData
38     """
39
40     logging.info("Generating the tables ...")
41     for table in spec.tables:
42         try:
43             eval(table["algorithm"])(table, data)
44         except NameError as err:
45             logging.error("Probably algorithm '{alg}' is not defined: {err}".
46                           format(alg=table["algorithm"], err=repr(err)))
47     logging.info("Done.")
48
49
50 def table_details(table, input_data):
51     """Generate the table(s) with algorithm: table_detailed_test_results
52     specified in the specification file.
53
54     :param table: Table to generate.
55     :param input_data: Data to process.
56     :type table: pandas.Series
57     :type input_data: InputData
58     """
59
60     logging.info("  Generating the table {0} ...".
61                  format(table.get("title", "")))
62
63     # Transform the data
64     logging.info("    Creating the data set for the {0} '{1}'.".
65                  format(table.get("type", ""), table.get("title", "")))
66     data = input_data.filter_data(table)
67
68     # Prepare the header of the tables
69     header = list()
70     for column in table["columns"]:
71         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72
73     # Generate the data for the table according to the model in the table
74     # specification
75     job = table["data"].keys()[0]
76     build = str(table["data"][job][0])
77     try:
78         suites = input_data.suites(job, build)
79     except KeyError:
80         logging.error("    No data available. The table will not be generated.")
81         return
82
83     for suite_longname, suite in suites.iteritems():
84         # Generate data
85         suite_name = suite["name"]
86         table_lst = list()
87         for test in data[job][build].keys():
88             if data[job][build][test]["parent"] in suite_name:
89                 row_lst = list()
90                 for column in table["columns"]:
91                     try:
92                         col_data = str(data[job][build][test][column["data"].
93                                        split(" ")[1]]).replace('"', '""')
94                         if column["data"].split(" ")[1] in ("vat-history",
95                                                             "show-run"):
96                             col_data = replace(col_data, " |br| ", "",
97                                                maxreplace=1)
98                             col_data = " |prein| {0} |preout| ".\
99                                 format(col_data[:-5])
100                         row_lst.append('"{0}"'.format(col_data))
101                     except KeyError:
102                         row_lst.append("No data")
103                 table_lst.append(row_lst)
104
105         # Write the data to file
106         if table_lst:
107             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108                                             table["output-file-ext"])
109             logging.info("      Writing file: '{}'".format(file_name))
110             with open(file_name, "w") as file_handler:
111                 file_handler.write(",".join(header) + "\n")
112                 for item in table_lst:
113                     file_handler.write(",".join(item) + "\n")
114
115     logging.info("  Done.")
116
117
118 def table_merged_details(table, input_data):
119     """Generate the table(s) with algorithm: table_merged_details
120     specified in the specification file.
121
122     :param table: Table to generate.
123     :param input_data: Data to process.
124     :type table: pandas.Series
125     :type input_data: InputData
126     """
127
128     logging.info("  Generating the table {0} ...".
129                  format(table.get("title", "")))
130
131     # Transform the data
132     logging.info("    Creating the data set for the {0} '{1}'.".
133                  format(table.get("type", ""), table.get("title", "")))
134     data = input_data.filter_data(table)
135     data = input_data.merge_data(data)
136     data.sort_index(inplace=True)
137
138     logging.info("    Creating the data set for the {0} '{1}'.".
139                  format(table.get("type", ""), table.get("title", "")))
140     suites = input_data.filter_data(table, data_set="suites")
141     suites = input_data.merge_data(suites)
142
143     # Prepare the header of the tables
144     header = list()
145     for column in table["columns"]:
146         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
147
148     for _, suite in suites.iteritems():
149         # Generate data
150         suite_name = suite["name"]
151         table_lst = list()
152         for test in data.keys():
153             if data[test]["parent"] in suite_name:
154                 row_lst = list()
155                 for column in table["columns"]:
156                     try:
157                         col_data = str(data[test][column["data"].
158                                        split(" ")[1]]).replace('"', '""')
159                         if column["data"].split(" ")[1] in ("vat-history",
160                                                             "show-run"):
161                             col_data = replace(col_data, " |br| ", "",
162                                                maxreplace=1)
163                             col_data = " |prein| {0} |preout| ".\
164                                 format(col_data[:-5])
165                         row_lst.append('"{0}"'.format(col_data))
166                     except KeyError:
167                         row_lst.append("No data")
168                 table_lst.append(row_lst)
169
170         # Write the data to file
171         if table_lst:
172             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173                                             table["output-file-ext"])
174             logging.info("      Writing file: '{}'".format(file_name))
175             with open(file_name, "w") as file_handler:
176                 file_handler.write(",".join(header) + "\n")
177                 for item in table_lst:
178                     file_handler.write(",".join(item) + "\n")
179
180     logging.info("  Done.")
181
182
183 def table_performance_improvements(table, input_data):
184     """Generate the table(s) with algorithm: table_performance_improvements
185     specified in the specification file.
186
187     # FIXME: Not used now.
188
189     :param table: Table to generate.
190     :param input_data: Data to process.
191     :type table: pandas.Series
192     :type input_data: InputData
193     """
194
195     def _write_line_to_file(file_handler, data):
196         """Write a line to the .csv file.
197
198         :param file_handler: File handler for the csv file. It must be open for
199          writing text.
200         :param data: Item to be written to the file.
201         :type file_handler: BinaryIO
202         :type data: list
203         """
204
205         line_lst = list()
206         for item in data:
207             if isinstance(item["data"], str):
208                 # Remove -?drdisc from the end
209                 if item["data"].endswith("drdisc"):
210                     item["data"] = item["data"][:-8]
211                 line_lst.append(item["data"])
212             elif isinstance(item["data"], float):
213                 line_lst.append("{:.1f}".format(item["data"]))
214             elif item["data"] is None:
215                 line_lst.append("")
216         file_handler.write(",".join(line_lst) + "\n")
217
218     logging.info("  Generating the table {0} ...".
219                  format(table.get("title", "")))
220
221     # Read the template
222     file_name = table.get("template", None)
223     if file_name:
224         try:
225             tmpl = _read_csv_template(file_name)
226         except PresentationError:
227             logging.error("  The template '{0}' does not exist. Skipping the "
228                           "table.".format(file_name))
229             return None
230     else:
231         logging.error("The template is not defined. Skipping the table.")
232         return None
233
234     # Transform the data
235     logging.info("    Creating the data set for the {0} '{1}'.".
236                  format(table.get("type", ""), table.get("title", "")))
237     data = input_data.filter_data(table)
238
239     # Prepare the header of the tables
240     header = list()
241     for column in table["columns"]:
242         header.append(column["title"])
243
244     # Generate the data for the table according to the model in the table
245     # specification
246     tbl_lst = list()
247     for tmpl_item in tmpl:
248         tbl_item = list()
249         for column in table["columns"]:
250             cmd = column["data"].split(" ")[0]
251             args = column["data"].split(" ")[1:]
252             if cmd == "template":
253                 try:
254                     val = float(tmpl_item[int(args[0])])
255                 except ValueError:
256                     val = tmpl_item[int(args[0])]
257                 tbl_item.append({"data": val})
258             elif cmd == "data":
259                 jobs = args[0:-1]
260                 operation = args[-1]
261                 data_lst = list()
262                 for job in jobs:
263                     for build in data[job]:
264                         try:
265                             data_lst.append(float(build[tmpl_item[0]]
266                                                   ["throughput"]["value"]))
267                         except (KeyError, TypeError):
268                             # No data, ignore
269                             continue
270                 if data_lst:
271                     tbl_item.append({"data": (eval(operation)(data_lst)) /
272                                              1000000})
273                 else:
274                     tbl_item.append({"data": None})
275             elif cmd == "operation":
276                 operation = args[0]
277                 try:
278                     nr1 = float(tbl_item[int(args[1])]["data"])
279                     nr2 = float(tbl_item[int(args[2])]["data"])
280                     if nr1 and nr2:
281                         tbl_item.append({"data": eval(operation)(nr1, nr2)})
282                     else:
283                         tbl_item.append({"data": None})
284                 except (IndexError, ValueError, TypeError):
285                     logging.error("No data for {0}".format(tbl_item[0]["data"]))
286                     tbl_item.append({"data": None})
287                     continue
288             else:
289                 logging.error("Not supported command {0}. Skipping the table.".
290                               format(cmd))
291                 return None
292         tbl_lst.append(tbl_item)
293
294     # Sort the table according to the relative change
295     tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
296
297     # Create the tables and write them to the files
298     file_names = [
299         "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
300         "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
301         "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
302         "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
303     ]
304
305     for file_name in file_names:
306         logging.info("    Writing the file '{0}'".format(file_name))
307         with open(file_name, "w") as file_handler:
308             file_handler.write(",".join(header) + "\n")
309             for item in tbl_lst:
310                 if isinstance(item[-1]["data"], float):
311                     rel_change = round(item[-1]["data"], 1)
312                 else:
313                     rel_change = item[-1]["data"]
314                 if "ndr_top" in file_name \
315                         and "ndr" in item[0]["data"] \
316                         and rel_change >= 10.0:
317                     _write_line_to_file(file_handler, item)
318                 elif "pdr_top" in file_name \
319                         and "pdr" in item[0]["data"] \
320                         and rel_change >= 10.0:
321                     _write_line_to_file(file_handler, item)
322                 elif "ndr_low" in file_name \
323                         and "ndr" in item[0]["data"] \
324                         and rel_change < 10.0:
325                     _write_line_to_file(file_handler, item)
326                 elif "pdr_low" in file_name \
327                         and "pdr" in item[0]["data"] \
328                         and rel_change < 10.0:
329                     _write_line_to_file(file_handler, item)
330
331     logging.info("  Done.")
332
333
334 def _read_csv_template(file_name):
335     """Read the template from a .csv file.
336
337     :param file_name: Name / full path / relative path of the file to read.
338     :type file_name: str
339     :returns: Data from the template as list (lines) of lists (items on line).
340     :rtype: list
341     :raises: PresentationError if it is not possible to read the file.
342     """
343
344     try:
345         with open(file_name, 'r') as csv_file:
346             tmpl_data = list()
347             for line in csv_file:
348                 tmpl_data.append(line[:-1].split(","))
349         return tmpl_data
350     except IOError as err:
351         raise PresentationError(str(err), level="ERROR")
352
353
354 def table_performance_comparison(table, input_data):
355     """Generate the table(s) with algorithm: table_performance_comparison
356     specified in the specification file.
357
358     :param table: Table to generate.
359     :param input_data: Data to process.
360     :type table: pandas.Series
361     :type input_data: InputData
362     """
363
364     logging.info("  Generating the table {0} ...".
365                  format(table.get("title", "")))
366
367     # Transform the data
368     logging.info("    Creating the data set for the {0} '{1}'.".
369                  format(table.get("type", ""), table.get("title", "")))
370     data = input_data.filter_data(table, continue_on_error=True)
371
372     # Prepare the header of the tables
373     try:
374         header = ["Test case", ]
375
376         history = table.get("history", None)
377         if history:
378             for item in history:
379                 header.extend(
380                     ["{0} Throughput [Mpps]".format(item["title"]),
381                      "{0} Stdev [Mpps]".format(item["title"])])
382         header.extend(
383             ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
384              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
385              "{0} Throughput [Mpps]".format(table["compare"]["title"]),
386              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
387              "Change [%]"])
388         header_str = ",".join(header) + "\n"
389     except (AttributeError, KeyError) as err:
390         logging.error("The model is invalid, missing parameter: {0}".
391                       format(err))
392         return
393
394     # Prepare data to the table:
395     tbl_dict = dict()
396     for job, builds in table["reference"]["data"].items():
397         for build in builds:
398             for tst_name, tst_data in data[job][str(build)].iteritems():
399                 if tbl_dict.get(tst_name, None) is None:
400                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
401                                             "-".join(tst_data["name"].
402                                                      split("-")[1:]))
403                     tbl_dict[tst_name] = {"name": name,
404                                           "ref-data": list(),
405                                           "cmp-data": list()}
406                 try:
407                     tbl_dict[tst_name]["ref-data"].\
408                         append(tst_data["throughput"]["value"])
409                 except TypeError:
410                     pass  # No data in output.xml for this test
411
412     for job, builds in table["compare"]["data"].items():
413         for build in builds:
414             for tst_name, tst_data in data[job][str(build)].iteritems():
415                 try:
416                     tbl_dict[tst_name]["cmp-data"].\
417                         append(tst_data["throughput"]["value"])
418                 except KeyError:
419                     pass
420                 except TypeError:
421                     tbl_dict.pop(tst_name, None)
422     if history:
423         for item in history:
424             for job, builds in item["data"].items():
425                 for build in builds:
426                     for tst_name, tst_data in data[job][str(build)].iteritems():
427                         if tbl_dict.get(tst_name, None) is None:
428                             continue
429                         if tbl_dict[tst_name].get("history", None) is None:
430                             tbl_dict[tst_name]["history"] = OrderedDict()
431                         if tbl_dict[tst_name]["history"].get(item["title"],
432                                                              None) is None:
433                             tbl_dict[tst_name]["history"][item["title"]] = \
434                                 list()
435                         try:
436                             tbl_dict[tst_name]["history"][item["title"]].\
437                                 append(tst_data["throughput"]["value"])
438                         except (TypeError, KeyError):
439                             pass
440
441     tbl_lst = list()
442     for tst_name in tbl_dict.keys():
443         item = [tbl_dict[tst_name]["name"], ]
444         if history:
445             if tbl_dict[tst_name].get("history", None) is not None:
446                 for hist_data in tbl_dict[tst_name]["history"].values():
447                     if hist_data:
448                         item.append(round(mean(hist_data) / 1000000, 2))
449                         item.append(round(stdev(hist_data) / 1000000, 2))
450                     else:
451                         item.extend([None, None])
452             else:
453                 item.extend([None, None])
454         data_t = tbl_dict[tst_name]["ref-data"]
455         if data_t:
456             item.append(round(mean(data_t) / 1000000, 2))
457             item.append(round(stdev(data_t) / 1000000, 2))
458         else:
459             item.extend([None, None])
460         data_t = tbl_dict[tst_name]["cmp-data"]
461         if data_t:
462             item.append(round(mean(data_t) / 1000000, 2))
463             item.append(round(stdev(data_t) / 1000000, 2))
464         else:
465             item.extend([None, None])
466         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
467             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
468         if len(item) == len(header):
469             tbl_lst.append(item)
470
471     # Sort the table according to the relative change
472     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
473
474     # Generate tables:
475     # All tests in csv:
476     tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
477                                                table["output-file-ext"]),
478                  "{0}-ndr-2t2c-full{1}".format(table["output-file"],
479                                                table["output-file-ext"]),
480                  "{0}-ndr-4t4c-full{1}".format(table["output-file"],
481                                                table["output-file-ext"]),
482                  "{0}-pdr-1t1c-full{1}".format(table["output-file"],
483                                                table["output-file-ext"]),
484                  "{0}-pdr-2t2c-full{1}".format(table["output-file"],
485                                                table["output-file-ext"]),
486                  "{0}-pdr-4t4c-full{1}".format(table["output-file"],
487                                                table["output-file-ext"])
488                  ]
489     for file_name in tbl_names:
490         logging.info("      Writing file: '{0}'".format(file_name))
491         with open(file_name, "w") as file_handler:
492             file_handler.write(header_str)
493             for test in tbl_lst:
494                 if (file_name.split("-")[-3] in test[0] and    # NDR vs PDR
495                         file_name.split("-")[-2] in test[0]):  # cores
496                     test[0] = "-".join(test[0].split("-")[:-1])
497                     file_handler.write(",".join([str(item) for item in test]) +
498                                        "\n")
499
500     # All tests in txt:
501     tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
502                      "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
503                      "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
504                      "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
505                      "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
506                      "{0}-pdr-4t4c-full.txt".format(table["output-file"])
507                      ]
508
509     for i, txt_name in enumerate(tbl_names_txt):
510         logging.info("      Writing file: '{0}'".format(txt_name))
511         convert_csv_to_pretty_txt(tbl_names[i], txt_name)
512
513     # Selected tests in csv:
514     input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
515                                                table["output-file-ext"])
516     with open(input_file, "r") as in_file:
517         lines = list()
518         for line in in_file:
519             lines.append(line)
520
521     output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
522                                                table["output-file-ext"])
523     logging.info("      Writing file: '{0}'".format(output_file))
524     with open(output_file, "w") as out_file:
525         out_file.write(header_str)
526         for i, line in enumerate(lines[1:]):
527             if i == table["nr-of-tests-shown"]:
528                 break
529             out_file.write(line)
530
531     output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
532                                                   table["output-file-ext"])
533     logging.info("      Writing file: '{0}'".format(output_file))
534     with open(output_file, "w") as out_file:
535         out_file.write(header_str)
536         for i, line in enumerate(lines[-1:0:-1]):
537             if i == table["nr-of-tests-shown"]:
538                 break
539             out_file.write(line)
540
541     input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
542                                                table["output-file-ext"])
543     with open(input_file, "r") as in_file:
544         lines = list()
545         for line in in_file:
546             lines.append(line)
547
548     output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
549                                                table["output-file-ext"])
550     logging.info("      Writing file: '{0}'".format(output_file))
551     with open(output_file, "w") as out_file:
552         out_file.write(header_str)
553         for i, line in enumerate(lines[1:]):
554             if i == table["nr-of-tests-shown"]:
555                 break
556             out_file.write(line)
557
558     output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
559                                                   table["output-file-ext"])
560     logging.info("      Writing file: '{0}'".format(output_file))
561     with open(output_file, "w") as out_file:
562         out_file.write(header_str)
563         for i, line in enumerate(lines[-1:0:-1]):
564             if i == table["nr-of-tests-shown"]:
565                 break
566             out_file.write(line)
567
568
569 def table_performance_comparison_mrr(table, input_data):
570     """Generate the table(s) with algorithm: table_performance_comparison_mrr
571     specified in the specification file.
572
573     :param table: Table to generate.
574     :param input_data: Data to process.
575     :type table: pandas.Series
576     :type input_data: InputData
577     """
578
579     logging.info("  Generating the table {0} ...".
580                  format(table.get("title", "")))
581
582     # Transform the data
583     logging.info("    Creating the data set for the {0} '{1}'.".
584                  format(table.get("type", ""), table.get("title", "")))
585     data = input_data.filter_data(table, continue_on_error=True)
586
587     # Prepare the header of the tables
588     try:
589         header = ["Test case",
590                   "{0} Throughput [Mpps]".format(table["reference"]["title"]),
591                   "{0} stdev [Mpps]".format(table["reference"]["title"]),
592                   "{0} Throughput [Mpps]".format(table["compare"]["title"]),
593                   "{0} stdev [Mpps]".format(table["compare"]["title"]),
594                   "Change [%]"]
595         header_str = ",".join(header) + "\n"
596     except (AttributeError, KeyError) as err:
597         logging.error("The model is invalid, missing parameter: {0}".
598                       format(err))
599         return
600
601     # Prepare data to the table:
602     tbl_dict = dict()
603     for job, builds in table["reference"]["data"].items():
604         for build in builds:
605             for tst_name, tst_data in data[job][str(build)].iteritems():
606                 if tbl_dict.get(tst_name, None) is None:
607                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
608                                             "-".join(tst_data["name"].
609                                                      split("-")[1:]))
610                     tbl_dict[tst_name] = {"name": name,
611                                           "ref-data": list(),
612                                           "cmp-data": list()}
613                 try:
614                     tbl_dict[tst_name]["ref-data"].\
615                         append(tst_data["result"]["receive-rate"].avg)
616                 except TypeError:
617                     pass  # No data in output.xml for this test
618
619     for job, builds in table["compare"]["data"].items():
620         for build in builds:
621             for tst_name, tst_data in data[job][str(build)].iteritems():
622                 try:
623                     tbl_dict[tst_name]["cmp-data"].\
624                         append(tst_data["result"]["receive-rate"].avg)
625                 except KeyError:
626                     pass
627                 except TypeError:
628                     tbl_dict.pop(tst_name, None)
629
630     tbl_lst = list()
631     for tst_name in tbl_dict.keys():
632         item = [tbl_dict[tst_name]["name"], ]
633         data_t = tbl_dict[tst_name]["ref-data"]
634         if data_t:
635             item.append(round(mean(data_t) / 1000000, 2))
636             item.append(round(stdev(data_t) / 1000000, 2))
637         else:
638             item.extend([None, None])
639         data_t = tbl_dict[tst_name]["cmp-data"]
640         if data_t:
641             item.append(round(mean(data_t) / 1000000, 2))
642             item.append(round(stdev(data_t) / 1000000, 2))
643         else:
644             item.extend([None, None])
645         if item[1] is not None and item[3] is not None and item[1] != 0:
646             item.append(int(relative_change(float(item[1]), float(item[3]))))
647         if len(item) == 6:
648             tbl_lst.append(item)
649
650     # Sort the table according to the relative change
651     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
652
653     # Generate tables:
654     # All tests in csv:
655     tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
656                                            table["output-file-ext"]),
657                  "{0}-2t2c-full{1}".format(table["output-file"],
658                                            table["output-file-ext"]),
659                  "{0}-4t4c-full{1}".format(table["output-file"],
660                                            table["output-file-ext"])
661                  ]
662     for file_name in tbl_names:
663         logging.info("      Writing file: '{0}'".format(file_name))
664         with open(file_name, "w") as file_handler:
665             file_handler.write(header_str)
666             for test in tbl_lst:
667                 if file_name.split("-")[-2] in test[0]:  # cores
668                     test[0] = "-".join(test[0].split("-")[:-1])
669                     file_handler.write(",".join([str(item) for item in test]) +
670                                        "\n")
671
672     # All tests in txt:
673     tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
674                      "{0}-2t2c-full.txt".format(table["output-file"]),
675                      "{0}-4t4c-full.txt".format(table["output-file"])
676                      ]
677
678     for i, txt_name in enumerate(tbl_names_txt):
679         logging.info("      Writing file: '{0}'".format(txt_name))
680         convert_csv_to_pretty_txt(tbl_names[i], txt_name)
681
682
683 def table_performance_trending_dashboard(table, input_data):
684     """Generate the table(s) with algorithm:
685     table_performance_trending_dashboard
686     specified in the specification file.
687
688     :param table: Table to generate.
689     :param input_data: Data to process.
690     :type table: pandas.Series
691     :type input_data: InputData
692     """
693
694     logging.info("  Generating the table {0} ...".
695                  format(table.get("title", "")))
696
697     # Transform the data
698     logging.info("    Creating the data set for the {0} '{1}'.".
699                  format(table.get("type", ""), table.get("title", "")))
700     data = input_data.filter_data(table, continue_on_error=True)
701
702     # Prepare the header of the tables
703     header = ["Test Case",
704               "Trend [Mpps]",
705               "Short-Term Change [%]",
706               "Long-Term Change [%]",
707               "Regressions [#]",
708               "Progressions [#]"
709               ]
710     header_str = ",".join(header) + "\n"
711
712     # Prepare data to the table:
713     tbl_dict = dict()
714     for job, builds in table["data"].items():
715         for build in builds:
716             for tst_name, tst_data in data[job][str(build)].iteritems():
717                 if tst_name.lower() in table["ignore-list"]:
718                     continue
719                 if tbl_dict.get(tst_name, None) is None:
720                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
721                                             tst_data["name"])
722                     tbl_dict[tst_name] = {"name": name,
723                                           "data": OrderedDict()}
724                 try:
725                     tbl_dict[tst_name]["data"][str(build)] =  \
726                         tst_data["result"]["receive-rate"]
727                 except (TypeError, KeyError):
728                     pass  # No data in output.xml for this test
729
730     tbl_lst = list()
731     for tst_name in tbl_dict.keys():
732         data_t = tbl_dict[tst_name]["data"]
733         if len(data_t) < 2:
734             continue
735
736         classification_lst, avgs = classify_anomalies(data_t)
737
738         win_size = min(len(data_t), table["window"])
739         long_win_size = min(len(data_t), table["long-trend-window"])
740
741         try:
742             max_long_avg = max(
743                 [x for x in avgs[-long_win_size:-win_size]
744                  if not isnan(x)])
745         except ValueError:
746             max_long_avg = nan
747         last_avg = avgs[-1]
748         avg_week_ago = avgs[max(-win_size, -len(avgs))]
749
750         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
751             rel_change_last = nan
752         else:
753             rel_change_last = round(
754                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
755
756         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
757             rel_change_long = nan
758         else:
759             rel_change_long = round(
760                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
761
762         if classification_lst:
763             if isnan(rel_change_last) and isnan(rel_change_long):
764                 continue
765             tbl_lst.append(
766                 [tbl_dict[tst_name]["name"],
767                  '-' if isnan(last_avg) else
768                  round(last_avg / 1000000, 2),
769                  '-' if isnan(rel_change_last) else rel_change_last,
770                  '-' if isnan(rel_change_long) else rel_change_long,
771                  classification_lst[-win_size:].count("regression"),
772                  classification_lst[-win_size:].count("progression")])
773
774     tbl_lst.sort(key=lambda rel: rel[0])
775
776     tbl_sorted = list()
777     for nrr in range(table["window"], -1, -1):
778         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
779         for nrp in range(table["window"], -1, -1):
780             tbl_out = [item for item in tbl_reg if item[5] == nrp]
781             tbl_out.sort(key=lambda rel: rel[2])
782             tbl_sorted.extend(tbl_out)
783
784     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
785
786     logging.info("    Writing file: '{0}'".format(file_name))
787     with open(file_name, "w") as file_handler:
788         file_handler.write(header_str)
789         for test in tbl_sorted:
790             file_handler.write(",".join([str(item) for item in test]) + '\n')
791
792     txt_file_name = "{0}.txt".format(table["output-file"])
793     logging.info("    Writing file: '{0}'".format(txt_file_name))
794     convert_csv_to_pretty_txt(file_name, txt_file_name)
795
796
797 def _generate_url(base, test_name):
798     """Generate URL to a trending plot from the name of the test case.
799
800     :param base: The base part of URL common to all test cases.
801     :param test_name: The name of the test case.
802     :type base: str
803     :type test_name: str
804     :returns: The URL to the plot with the trending data for the given test
805         case.
806     :rtype str
807     """
808
809     url = base
810     file_name = ""
811     anchor = "#"
812     feature = ""
813
814     if "lbdpdk" in test_name or "lbvpp" in test_name:
815         file_name = "link_bonding.html"
816
817     elif "testpmd" in test_name or "l3fwd" in test_name:
818         file_name = "dpdk.html"
819
820     elif "memif" in test_name:
821         file_name = "container_memif.html"
822
823     elif "srv6" in test_name:
824         file_name = "srv6.html"
825
826     elif "vhost" in test_name:
827         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
828             file_name = "vm_vhost_l2.html"
829         elif "ip4base" in test_name:
830             file_name = "vm_vhost_ip4.html"
831
832     elif "ipsec" in test_name:
833         file_name = "ipsec.html"
834
835     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
836         file_name = "ip4_tunnels.html"
837
838     elif "ip4base" in test_name or "ip4scale" in test_name:
839         file_name = "ip4.html"
840         if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
841             feature = "-features"
842
843     elif "ip6base" in test_name or "ip6scale" in test_name:
844         file_name = "ip6.html"
845
846     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
847             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
848             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
849         file_name = "l2.html"
850         if "iacl" in test_name:
851             feature = "-features"
852
853     if "x520" in test_name:
854         anchor += "x520-"
855     elif "x710" in test_name:
856         anchor += "x710-"
857     elif "xl710" in test_name:
858         anchor += "xl710-"
859
860     if "64b" in test_name:
861         anchor += "64b-"
862     elif "78b" in test_name:
863         anchor += "78b-"
864     elif "imix" in test_name:
865         anchor += "imix-"
866     elif "9000b" in test_name:
867         anchor += "9000b-"
868     elif "1518" in test_name:
869         anchor += "1518b-"
870
871     if "1t1c" in test_name:
872         anchor += "1t1c"
873     elif "2t2c" in test_name:
874         anchor += "2t2c"
875     elif "4t4c" in test_name:
876         anchor += "4t4c"
877
878     return url + file_name + anchor + feature
879
880
881 def table_performance_trending_dashboard_html(table, input_data):
882     """Generate the table(s) with algorithm:
883     table_performance_trending_dashboard_html specified in the specification
884     file.
885
886     :param table: Table to generate.
887     :param input_data: Data to process.
888     :type table: pandas.Series
889     :type input_data: InputData
890     """
891
892     logging.info("  Generating the table {0} ...".
893                  format(table.get("title", "")))
894
895     try:
896         with open(table["input-file"], 'rb') as csv_file:
897             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
898             csv_lst = [item for item in csv_content]
899     except KeyError:
900         logging.warning("The input file is not defined.")
901         return
902     except csv.Error as err:
903         logging.warning("Not possible to process the file '{0}'.\n{1}".
904                         format(table["input-file"], err))
905         return
906
907     # Table:
908     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
909
910     # Table header:
911     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
912     for idx, item in enumerate(csv_lst[0]):
913         alignment = "left" if idx == 0 else "center"
914         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
915         th.text = item
916
917     # Rows:
918     colors = {"regression": ("#ffcccc", "#ff9999"),
919               "progression": ("#c6ecc6", "#9fdf9f"),
920               "normal": ("#e9f1fb", "#d4e4f7")}
921     for r_idx, row in enumerate(csv_lst[1:]):
922         if int(row[4]):
923             color = "regression"
924         elif int(row[5]):
925             color = "progression"
926         else:
927             color = "normal"
928         background = colors[color][r_idx % 2]
929         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
930
931         # Columns:
932         for c_idx, item in enumerate(row):
933             alignment = "left" if c_idx == 0 else "center"
934             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
935             # Name:
936             if c_idx == 0:
937                 url = _generate_url("../trending/", item)
938                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
939                 ref.text = item
940             else:
941                 td.text = item
942     try:
943         with open(table["output-file"], 'w') as html_file:
944             logging.info("    Writing file: '{0}'".format(table["output-file"]))
945             html_file.write(".. raw:: html\n\n\t")
946             html_file.write(ET.tostring(dashboard))
947             html_file.write("\n\t<p><br><br></p>\n")
948     except KeyError:
949         logging.warning("The output file is not defined.")
950         return
951
952
953 def table_failed_tests(table, input_data):
954     """Generate the table(s) with algorithm: table_failed_tests
955     specified in the specification file.
956
957     :param table: Table to generate.
958     :param input_data: Data to process.
959     :type table: pandas.Series
960     :type input_data: InputData
961     """
962
963     logging.info("  Generating the table {0} ...".
964                  format(table.get("title", "")))
965
966     # Transform the data
967     logging.info("    Creating the data set for the {0} '{1}'.".
968                  format(table.get("type", ""), table.get("title", "")))
969     data = input_data.filter_data(table, continue_on_error=True)
970
971     # Prepare the header of the tables
972     header = ["Test Case",
973               "Failures [#]",
974               "Last Failure [Time]",
975               "Last Failure [VPP-Build-Id]",
976               "Last Failure [CSIT-Job-Build-Id]"]
977
978     # Generate the data for the table according to the model in the table
979     # specification
980     tbl_dict = dict()
981     for job, builds in table["data"].items():
982         for build in builds:
983             build = str(build)
984             for tst_name, tst_data in data[job][build].iteritems():
985                 if tst_name.lower() in table["ignore-list"]:
986                     continue
987                 if tbl_dict.get(tst_name, None) is None:
988                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
989                                             tst_data["name"])
990                     tbl_dict[tst_name] = {"name": name,
991                                           "data": OrderedDict()}
992                 try:
993                     tbl_dict[tst_name]["data"][build] = (
994                         tst_data["status"],
995                         input_data.metadata(job, build).get("generated", ""),
996                         input_data.metadata(job, build).get("version", ""),
997                         build)
998                 except (TypeError, KeyError):
999                     pass  # No data in output.xml for this test
1000
1001     tbl_lst = list()
1002     for tst_data in tbl_dict.values():
1003         win_size = min(len(tst_data["data"]), table["window"])
1004         fails_nr = 0
1005         for val in tst_data["data"].values()[-win_size:]:
1006             if val[0] == "FAIL":
1007                 fails_nr += 1
1008                 fails_last_date = val[1]
1009                 fails_last_vpp = val[2]
1010                 fails_last_csit = val[3]
1011         if fails_nr:
1012             tbl_lst.append([tst_data["name"],
1013                             fails_nr,
1014                             fails_last_date,
1015                             fails_last_vpp,
1016                             "mrr-daily-build-{0}".format(fails_last_csit)])
1017
1018     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1019     tbl_sorted = list()
1020     for nrf in range(table["window"], -1, -1):
1021         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1022         tbl_sorted.extend(tbl_fails)
1023     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1024
1025     logging.info("    Writing file: '{0}'".format(file_name))
1026     with open(file_name, "w") as file_handler:
1027         file_handler.write(",".join(header) + "\n")
1028         for test in tbl_sorted:
1029             file_handler.write(",".join([str(item) for item in test]) + '\n')
1030
1031     txt_file_name = "{0}.txt".format(table["output-file"])
1032     logging.info("    Writing file: '{0}'".format(txt_file_name))
1033     convert_csv_to_pretty_txt(file_name, txt_file_name)
1034
1035
1036 def table_failed_tests_html(table, input_data):
1037     """Generate the table(s) with algorithm: table_failed_tests_html
1038     specified in the specification file.
1039
1040     :param table: Table to generate.
1041     :param input_data: Data to process.
1042     :type table: pandas.Series
1043     :type input_data: InputData
1044     """
1045
1046     logging.info("  Generating the table {0} ...".
1047                  format(table.get("title", "")))
1048
1049     try:
1050         with open(table["input-file"], 'rb') as csv_file:
1051             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1052             csv_lst = [item for item in csv_content]
1053     except KeyError:
1054         logging.warning("The input file is not defined.")
1055         return
1056     except csv.Error as err:
1057         logging.warning("Not possible to process the file '{0}'.\n{1}".
1058                         format(table["input-file"], err))
1059         return
1060
1061     # Table:
1062     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1063
1064     # Table header:
1065     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1066     for idx, item in enumerate(csv_lst[0]):
1067         alignment = "left" if idx == 0 else "center"
1068         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1069         th.text = item
1070
1071     # Rows:
1072     colors = ("#e9f1fb", "#d4e4f7")
1073     for r_idx, row in enumerate(csv_lst[1:]):
1074         background = colors[r_idx % 2]
1075         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1076
1077         # Columns:
1078         for c_idx, item in enumerate(row):
1079             alignment = "left" if c_idx == 0 else "center"
1080             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1081             # Name:
1082             if c_idx == 0:
1083                 url = _generate_url("../trending/", item)
1084                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1085                 ref.text = item
1086             else:
1087                 td.text = item
1088     try:
1089         with open(table["output-file"], 'w') as html_file:
1090             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1091             html_file.write(".. raw:: html\n\n\t")
1092             html_file.write(ET.tostring(failed_tests))
1093             html_file.write("\n\t<p><br><br></p>\n")
1094     except KeyError:
1095         logging.warning("The output file is not defined.")
1096         return