CSIT-1208: Add new data to 1807 report
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
25
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 def generate_tables(spec, data):
32     """Generate all tables specified in the specification file.
33
34     :param spec: Specification read from the specification file.
35     :param data: Data to process.
36     :type spec: Specification
37     :type data: InputData
38     """
39
40     logging.info("Generating the tables ...")
41     for table in spec.tables:
42         try:
43             eval(table["algorithm"])(table, data)
44         except NameError as err:
45             logging.error("Probably algorithm '{alg}' is not defined: {err}".
46                           format(alg=table["algorithm"], err=repr(err)))
47     logging.info("Done.")
48
49
50 def table_details(table, input_data):
51     """Generate the table(s) with algorithm: table_detailed_test_results
52     specified in the specification file.
53
54     :param table: Table to generate.
55     :param input_data: Data to process.
56     :type table: pandas.Series
57     :type input_data: InputData
58     """
59
60     logging.info("  Generating the table {0} ...".
61                  format(table.get("title", "")))
62
63     # Transform the data
64     logging.info("    Creating the data set for the {0} '{1}'.".
65                  format(table.get("type", ""), table.get("title", "")))
66     data = input_data.filter_data(table)
67
68     # Prepare the header of the tables
69     header = list()
70     for column in table["columns"]:
71         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72
73     # Generate the data for the table according to the model in the table
74     # specification
75     job = table["data"].keys()[0]
76     build = str(table["data"][job][0])
77     try:
78         suites = input_data.suites(job, build)
79     except KeyError:
80         logging.error("    No data available. The table will not be generated.")
81         return
82
83     for suite_longname, suite in suites.iteritems():
84         # Generate data
85         suite_name = suite["name"]
86         table_lst = list()
87         for test in data[job][build].keys():
88             if data[job][build][test]["parent"] in suite_name:
89                 row_lst = list()
90                 for column in table["columns"]:
91                     try:
92                         col_data = str(data[job][build][test][column["data"].
93                                        split(" ")[1]]).replace('"', '""')
94                         if column["data"].split(" ")[1] in ("vat-history",
95                                                             "show-run"):
96                             col_data = replace(col_data, " |br| ", "",
97                                                maxreplace=1)
98                             col_data = " |prein| {0} |preout| ".\
99                                 format(col_data[:-5])
100                         row_lst.append('"{0}"'.format(col_data))
101                     except KeyError:
102                         row_lst.append("No data")
103                 table_lst.append(row_lst)
104
105         # Write the data to file
106         if table_lst:
107             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108                                             table["output-file-ext"])
109             logging.info("      Writing file: '{}'".format(file_name))
110             with open(file_name, "w") as file_handler:
111                 file_handler.write(",".join(header) + "\n")
112                 for item in table_lst:
113                     file_handler.write(",".join(item) + "\n")
114
115     logging.info("  Done.")
116
117
118 def table_merged_details(table, input_data):
119     """Generate the table(s) with algorithm: table_merged_details
120     specified in the specification file.
121
122     :param table: Table to generate.
123     :param input_data: Data to process.
124     :type table: pandas.Series
125     :type input_data: InputData
126     """
127
128     logging.info("  Generating the table {0} ...".
129                  format(table.get("title", "")))
130
131     # Transform the data
132     logging.info("    Creating the data set for the {0} '{1}'.".
133                  format(table.get("type", ""), table.get("title", "")))
134     data = input_data.filter_data(table)
135     data = input_data.merge_data(data)
136     data.sort_index(inplace=True)
137
138     logging.info("    Creating the data set for the {0} '{1}'.".
139                  format(table.get("type", ""), table.get("title", "")))
140     suites = input_data.filter_data(table, data_set="suites")
141     suites = input_data.merge_data(suites)
142
143     # Prepare the header of the tables
144     header = list()
145     for column in table["columns"]:
146         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
147
148     for _, suite in suites.iteritems():
149         # Generate data
150         suite_name = suite["name"]
151         table_lst = list()
152         for test in data.keys():
153             if data[test]["parent"] in suite_name:
154                 row_lst = list()
155                 for column in table["columns"]:
156                     try:
157                         col_data = str(data[test][column["data"].
158                                        split(" ")[1]]).replace('"', '""')
159                         if column["data"].split(" ")[1] in ("vat-history",
160                                                             "show-run"):
161                             col_data = replace(col_data, " |br| ", "",
162                                                maxreplace=1)
163                             col_data = " |prein| {0} |preout| ".\
164                                 format(col_data[:-5])
165                         row_lst.append('"{0}"'.format(col_data))
166                     except KeyError:
167                         row_lst.append("No data")
168                 table_lst.append(row_lst)
169
170         # Write the data to file
171         if table_lst:
172             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173                                             table["output-file-ext"])
174             logging.info("      Writing file: '{}'".format(file_name))
175             with open(file_name, "w") as file_handler:
176                 file_handler.write(",".join(header) + "\n")
177                 for item in table_lst:
178                     file_handler.write(",".join(item) + "\n")
179
180     logging.info("  Done.")
181
182
183 def table_performance_comparison(table, input_data):
184     """Generate the table(s) with algorithm: table_performance_comparison
185     specified in the specification file.
186
187     :param table: Table to generate.
188     :param input_data: Data to process.
189     :type table: pandas.Series
190     :type input_data: InputData
191     """
192
193     logging.info("  Generating the table {0} ...".
194                  format(table.get("title", "")))
195
196     # Transform the data
197     logging.info("    Creating the data set for the {0} '{1}'.".
198                  format(table.get("type", ""), table.get("title", "")))
199     data = input_data.filter_data(table, continue_on_error=True)
200
201     # Prepare the header of the tables
202     try:
203         header = ["Test case", ]
204
205         if table["include-tests"] == "MRR":
206             hdr_param = "Receive Rate"
207         else:
208             hdr_param = "Throughput"
209
210         history = table.get("history", None)
211         if history:
212             for item in history:
213                 header.extend(
214                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
215                      "{0} Stdev [Mpps]".format(item["title"])])
216         header.extend(
217             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
218              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
219              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
220              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
221              "Delta [%]"])
222         header_str = ",".join(header) + "\n"
223     except (AttributeError, KeyError) as err:
224         logging.error("The model is invalid, missing parameter: {0}".
225                       format(err))
226         return
227
228     # Prepare data to the table:
229     tbl_dict = dict()
230     for job, builds in table["reference"]["data"].items():
231         for build in builds:
232             for tst_name, tst_data in data[job][str(build)].iteritems():
233                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
234                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
235                     replace("-ndrdisc", "").replace("-pdr", "").\
236                     replace("-ndr", "").\
237                     replace("1t1c", "1c").replace("2t1c", "1c").\
238                     replace("2t2c", "2c").replace("4t2c", "2c").\
239                     replace("4t4c", "4c").replace("8t4c", "4c")
240                 if tbl_dict.get(tst_name_mod, None) is None:
241                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
242                                             "-".join(tst_data["name"].
243                                                      split("-")[:-1]))
244                     if "comparison across testbeds" in table["title"].lower():
245                         name = name.\
246                             replace("1t1c", "1c").replace("2t1c", "1c").\
247                             replace("2t2c", "2c").replace("4t2c", "2c").\
248                             replace("4t4c", "4c").replace("8t4c", "4c")
249                     tbl_dict[tst_name_mod] = {"name": name,
250                                               "ref-data": list(),
251                                               "cmp-data": list()}
252                 try:
253                     # TODO: Re-work when NDRPDRDISC tests are not used
254                     if table["include-tests"] == "MRR":
255                         tbl_dict[tst_name_mod]["ref-data"]. \
256                             append(tst_data["result"]["receive-rate"].avg)
257                     elif table["include-tests"] == "PDR":
258                         if tst_data["type"] == "PDR":
259                             tbl_dict[tst_name_mod]["ref-data"]. \
260                                 append(tst_data["throughput"]["value"])
261                         elif tst_data["type"] == "NDRPDR":
262                             tbl_dict[tst_name_mod]["ref-data"].append(
263                                 tst_data["throughput"]["PDR"]["LOWER"])
264                     elif table["include-tests"] == "NDR":
265                         if tst_data["type"] == "NDR":
266                             tbl_dict[tst_name_mod]["ref-data"]. \
267                                 append(tst_data["throughput"]["value"])
268                         elif tst_data["type"] == "NDRPDR":
269                             tbl_dict[tst_name_mod]["ref-data"].append(
270                                 tst_data["throughput"]["NDR"]["LOWER"])
271                     else:
272                         continue
273                 except TypeError:
274                     pass  # No data in output.xml for this test
275
276     for job, builds in table["compare"]["data"].items():
277         for build in builds:
278             for tst_name, tst_data in data[job][str(build)].iteritems():
279                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
280                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
281                     replace("-ndrdisc", "").replace("-pdr", ""). \
282                     replace("-ndr", "").\
283                     replace("1t1c", "1c").replace("2t1c", "1c").\
284                     replace("2t2c", "2c").replace("4t2c", "2c").\
285                     replace("4t4c", "4c").replace("8t4c", "4c")
286                 try:
287                     # TODO: Re-work when NDRPDRDISC tests are not used
288                     if table["include-tests"] == "MRR":
289                         tbl_dict[tst_name_mod]["cmp-data"]. \
290                             append(tst_data["result"]["receive-rate"].avg)
291                     elif table["include-tests"] == "PDR":
292                         if tst_data["type"] == "PDR":
293                             tbl_dict[tst_name_mod]["cmp-data"]. \
294                                 append(tst_data["throughput"]["value"])
295                         elif tst_data["type"] == "NDRPDR":
296                             tbl_dict[tst_name_mod]["cmp-data"].append(
297                                 tst_data["throughput"]["PDR"]["LOWER"])
298                     elif table["include-tests"] == "NDR":
299                         if tst_data["type"] == "NDR":
300                             tbl_dict[tst_name_mod]["cmp-data"]. \
301                                 append(tst_data["throughput"]["value"])
302                         elif tst_data["type"] == "NDRPDR":
303                             tbl_dict[tst_name_mod]["cmp-data"].append(
304                                 tst_data["throughput"]["NDR"]["LOWER"])
305                     else:
306                         continue
307                 except KeyError:
308                     pass
309                 except TypeError:
310                     tbl_dict.pop(tst_name_mod, None)
311     if history:
312         for item in history:
313             for job, builds in item["data"].items():
314                 for build in builds:
315                     for tst_name, tst_data in data[job][str(build)].iteritems():
316                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
317                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
318                             replace("-ndrdisc", "").replace("-pdr", ""). \
319                             replace("-ndr", "").\
320                             replace("1t1c", "1c").replace("2t1c", "1c").\
321                             replace("2t2c", "2c").replace("4t2c", "2c").\
322                             replace("4t4c", "4c").replace("8t4c", "4c")
323                         if tbl_dict.get(tst_name_mod, None) is None:
324                             continue
325                         if tbl_dict[tst_name_mod].get("history", None) is None:
326                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
327                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
328                                                              None) is None:
329                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
330                                 list()
331                         try:
332                             # TODO: Re-work when NDRPDRDISC tests are not used
333                             if table["include-tests"] == "MRR":
334                                 tbl_dict[tst_name_mod]["history"][item["title"
335                                 ]].append(tst_data["result"]["receive-rate"].
336                                           avg)
337                             elif table["include-tests"] == "PDR":
338                                 if tst_data["type"] == "PDR":
339                                     tbl_dict[tst_name_mod]["history"][
340                                         item["title"]].\
341                                         append(tst_data["throughput"]["value"])
342                                 elif tst_data["type"] == "NDRPDR":
343                                     tbl_dict[tst_name_mod]["history"][item[
344                                         "title"]].append(tst_data["throughput"][
345                                         "PDR"]["LOWER"])
346                             elif table["include-tests"] == "NDR":
347                                 if tst_data["type"] == "NDR":
348                                     tbl_dict[tst_name_mod]["history"][
349                                         item["title"]].\
350                                         append(tst_data["throughput"]["value"])
351                                 elif tst_data["type"] == "NDRPDR":
352                                     tbl_dict[tst_name_mod]["history"][item[
353                                         "title"]].append(tst_data["throughput"][
354                                         "NDR"]["LOWER"])
355                             else:
356                                 continue
357                         except (TypeError, KeyError):
358                             pass
359
360     tbl_lst = list()
361     for tst_name in tbl_dict.keys():
362         item = [tbl_dict[tst_name]["name"], ]
363         if history:
364             if tbl_dict[tst_name].get("history", None) is not None:
365                 for hist_data in tbl_dict[tst_name]["history"].values():
366                     if hist_data:
367                         item.append(round(mean(hist_data) / 1000000, 2))
368                         item.append(round(stdev(hist_data) / 1000000, 2))
369                     else:
370                         item.extend([None, None])
371             else:
372                 item.extend([None, None])
373         data_t = tbl_dict[tst_name]["ref-data"]
374         if data_t:
375             item.append(round(mean(data_t) / 1000000, 2))
376             item.append(round(stdev(data_t) / 1000000, 2))
377         else:
378             item.extend([None, None])
379         data_t = tbl_dict[tst_name]["cmp-data"]
380         if data_t:
381             item.append(round(mean(data_t) / 1000000, 2))
382             item.append(round(stdev(data_t) / 1000000, 2))
383         else:
384             item.extend([None, None])
385         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
386             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
387         if len(item) == len(header):
388             tbl_lst.append(item)
389
390     # Sort the table according to the relative change
391     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
392
393     # Generate csv tables:
394     csv_file = "{0}.csv".format(table["output-file"])
395     with open(csv_file, "w") as file_handler:
396         file_handler.write(header_str)
397         for test in tbl_lst:
398             file_handler.write(",".join([str(item) for item in test]) + "\n")
399
400     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
401
402
403 def table_performance_trending_dashboard(table, input_data):
404     """Generate the table(s) with algorithm:
405     table_performance_trending_dashboard
406     specified in the specification file.
407
408     :param table: Table to generate.
409     :param input_data: Data to process.
410     :type table: pandas.Series
411     :type input_data: InputData
412     """
413
414     logging.info("  Generating the table {0} ...".
415                  format(table.get("title", "")))
416
417     # Transform the data
418     logging.info("    Creating the data set for the {0} '{1}'.".
419                  format(table.get("type", ""), table.get("title", "")))
420     data = input_data.filter_data(table, continue_on_error=True)
421
422     # Prepare the header of the tables
423     header = ["Test Case",
424               "Trend [Mpps]",
425               "Short-Term Change [%]",
426               "Long-Term Change [%]",
427               "Regressions [#]",
428               "Progressions [#]"
429               ]
430     header_str = ",".join(header) + "\n"
431
432     # Prepare data to the table:
433     tbl_dict = dict()
434     for job, builds in table["data"].items():
435         for build in builds:
436             for tst_name, tst_data in data[job][str(build)].iteritems():
437                 if tst_name.lower() in table["ignore-list"]:
438                     continue
439                 if tbl_dict.get(tst_name, None) is None:
440                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
441                                             tst_data["name"])
442                     tbl_dict[tst_name] = {"name": name,
443                                           "data": OrderedDict()}
444                 try:
445                     tbl_dict[tst_name]["data"][str(build)] = \
446                         tst_data["result"]["receive-rate"]
447                 except (TypeError, KeyError):
448                     pass  # No data in output.xml for this test
449
450     tbl_lst = list()
451     for tst_name in tbl_dict.keys():
452         data_t = tbl_dict[tst_name]["data"]
453         if len(data_t) < 2:
454             continue
455
456         classification_lst, avgs = classify_anomalies(data_t)
457
458         win_size = min(len(data_t), table["window"])
459         long_win_size = min(len(data_t), table["long-trend-window"])
460
461         try:
462             max_long_avg = max(
463                 [x for x in avgs[-long_win_size:-win_size]
464                  if not isnan(x)])
465         except ValueError:
466             max_long_avg = nan
467         last_avg = avgs[-1]
468         avg_week_ago = avgs[max(-win_size, -len(avgs))]
469
470         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
471             rel_change_last = nan
472         else:
473             rel_change_last = round(
474                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
475
476         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
477             rel_change_long = nan
478         else:
479             rel_change_long = round(
480                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
481
482         if classification_lst:
483             if isnan(rel_change_last) and isnan(rel_change_long):
484                 continue
485             tbl_lst.append(
486                 [tbl_dict[tst_name]["name"],
487                  '-' if isnan(last_avg) else
488                  round(last_avg / 1000000, 2),
489                  '-' if isnan(rel_change_last) else rel_change_last,
490                  '-' if isnan(rel_change_long) else rel_change_long,
491                  classification_lst[-win_size:].count("regression"),
492                  classification_lst[-win_size:].count("progression")])
493
494     tbl_lst.sort(key=lambda rel: rel[0])
495
496     tbl_sorted = list()
497     for nrr in range(table["window"], -1, -1):
498         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
499         for nrp in range(table["window"], -1, -1):
500             tbl_out = [item for item in tbl_reg if item[5] == nrp]
501             tbl_out.sort(key=lambda rel: rel[2])
502             tbl_sorted.extend(tbl_out)
503
504     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
505
506     logging.info("    Writing file: '{0}'".format(file_name))
507     with open(file_name, "w") as file_handler:
508         file_handler.write(header_str)
509         for test in tbl_sorted:
510             file_handler.write(",".join([str(item) for item in test]) + '\n')
511
512     txt_file_name = "{0}.txt".format(table["output-file"])
513     logging.info("    Writing file: '{0}'".format(txt_file_name))
514     convert_csv_to_pretty_txt(file_name, txt_file_name)
515
516
517 def _generate_url(base, test_name):
518     """Generate URL to a trending plot from the name of the test case.
519
520     :param base: The base part of URL common to all test cases.
521     :param test_name: The name of the test case.
522     :type base: str
523     :type test_name: str
524     :returns: The URL to the plot with the trending data for the given test
525         case.
526     :rtype str
527     """
528
529     url = base
530     file_name = ""
531     anchor = "#"
532     feature = ""
533
534     if "lbdpdk" in test_name or "lbvpp" in test_name:
535         file_name = "link_bonding.html"
536
537     elif "testpmd" in test_name or "l3fwd" in test_name:
538         file_name = "dpdk.html"
539
540     elif "memif" in test_name:
541         file_name = "container_memif.html"
542
543     elif "srv6" in test_name:
544         file_name = "srv6.html"
545
546     elif "vhost" in test_name:
547         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
548             file_name = "vm_vhost_l2.html"
549         elif "ip4base" in test_name:
550             file_name = "vm_vhost_ip4.html"
551
552     elif "ipsec" in test_name:
553         file_name = "ipsec.html"
554
555     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
556         file_name = "ip4_tunnels.html"
557
558     elif "ip4base" in test_name or "ip4scale" in test_name:
559         file_name = "ip4.html"
560         if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
561             feature = "-features"
562
563     elif "ip6base" in test_name or "ip6scale" in test_name:
564         file_name = "ip6.html"
565
566     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
567             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
568             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
569         file_name = "l2.html"
570         if "iacl" in test_name:
571             feature = "-features"
572
573     if "x520" in test_name:
574         anchor += "x520-"
575     elif "x710" in test_name:
576         anchor += "x710-"
577     elif "xl710" in test_name:
578         anchor += "xl710-"
579
580     if "64b" in test_name:
581         anchor += "64b-"
582     elif "78b" in test_name:
583         anchor += "78b-"
584     elif "imix" in test_name:
585         anchor += "imix-"
586     elif "9000b" in test_name:
587         anchor += "9000b-"
588     elif "1518" in test_name:
589         anchor += "1518b-"
590
591     if "1t1c" in test_name:
592         anchor += "1t1c"
593     elif "2t2c" in test_name:
594         anchor += "2t2c"
595     elif "4t4c" in test_name:
596         anchor += "4t4c"
597
598     return url + file_name + anchor + feature
599
600
601 def table_performance_trending_dashboard_html(table, input_data):
602     """Generate the table(s) with algorithm:
603     table_performance_trending_dashboard_html specified in the specification
604     file.
605
606     :param table: Table to generate.
607     :param input_data: Data to process.
608     :type table: pandas.Series
609     :type input_data: InputData
610     """
611
612     logging.info("  Generating the table {0} ...".
613                  format(table.get("title", "")))
614
615     try:
616         with open(table["input-file"], 'rb') as csv_file:
617             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
618             csv_lst = [item for item in csv_content]
619     except KeyError:
620         logging.warning("The input file is not defined.")
621         return
622     except csv.Error as err:
623         logging.warning("Not possible to process the file '{0}'.\n{1}".
624                         format(table["input-file"], err))
625         return
626
627     # Table:
628     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
629
630     # Table header:
631     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
632     for idx, item in enumerate(csv_lst[0]):
633         alignment = "left" if idx == 0 else "center"
634         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
635         th.text = item
636
637     # Rows:
638     colors = {"regression": ("#ffcccc", "#ff9999"),
639               "progression": ("#c6ecc6", "#9fdf9f"),
640               "normal": ("#e9f1fb", "#d4e4f7")}
641     for r_idx, row in enumerate(csv_lst[1:]):
642         if int(row[4]):
643             color = "regression"
644         elif int(row[5]):
645             color = "progression"
646         else:
647             color = "normal"
648         background = colors[color][r_idx % 2]
649         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
650
651         # Columns:
652         for c_idx, item in enumerate(row):
653             alignment = "left" if c_idx == 0 else "center"
654             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
655             # Name:
656             if c_idx == 0:
657                 url = _generate_url("../trending/", item)
658                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
659                 ref.text = item
660             else:
661                 td.text = item
662     try:
663         with open(table["output-file"], 'w') as html_file:
664             logging.info("    Writing file: '{0}'".format(table["output-file"]))
665             html_file.write(".. raw:: html\n\n\t")
666             html_file.write(ET.tostring(dashboard))
667             html_file.write("\n\t<p><br><br></p>\n")
668     except KeyError:
669         logging.warning("The output file is not defined.")
670         return
671
672
673 def table_failed_tests(table, input_data):
674     """Generate the table(s) with algorithm: table_failed_tests
675     specified in the specification file.
676
677     :param table: Table to generate.
678     :param input_data: Data to process.
679     :type table: pandas.Series
680     :type input_data: InputData
681     """
682
683     logging.info("  Generating the table {0} ...".
684                  format(table.get("title", "")))
685
686     # Transform the data
687     logging.info("    Creating the data set for the {0} '{1}'.".
688                  format(table.get("type", ""), table.get("title", "")))
689     data = input_data.filter_data(table, continue_on_error=True)
690
691     # Prepare the header of the tables
692     header = ["Test Case",
693               "Failures [#]",
694               "Last Failure [Time]",
695               "Last Failure [VPP-Build-Id]",
696               "Last Failure [CSIT-Job-Build-Id]"]
697
698     # Generate the data for the table according to the model in the table
699     # specification
700     tbl_dict = dict()
701     for job, builds in table["data"].items():
702         for build in builds:
703             build = str(build)
704             for tst_name, tst_data in data[job][build].iteritems():
705                 if tst_name.lower() in table["ignore-list"]:
706                     continue
707                 if tbl_dict.get(tst_name, None) is None:
708                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
709                                             tst_data["name"])
710                     tbl_dict[tst_name] = {"name": name,
711                                           "data": OrderedDict()}
712                 try:
713                     tbl_dict[tst_name]["data"][build] = (
714                         tst_data["status"],
715                         input_data.metadata(job, build).get("generated", ""),
716                         input_data.metadata(job, build).get("version", ""),
717                         build)
718                 except (TypeError, KeyError):
719                     pass  # No data in output.xml for this test
720
721     tbl_lst = list()
722     for tst_data in tbl_dict.values():
723         win_size = min(len(tst_data["data"]), table["window"])
724         fails_nr = 0
725         for val in tst_data["data"].values()[-win_size:]:
726             if val[0] == "FAIL":
727                 fails_nr += 1
728                 fails_last_date = val[1]
729                 fails_last_vpp = val[2]
730                 fails_last_csit = val[3]
731         if fails_nr:
732             tbl_lst.append([tst_data["name"],
733                             fails_nr,
734                             fails_last_date,
735                             fails_last_vpp,
736                             "mrr-daily-build-{0}".format(fails_last_csit)])
737
738     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
739     tbl_sorted = list()
740     for nrf in range(table["window"], -1, -1):
741         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
742         tbl_sorted.extend(tbl_fails)
743     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
744
745     logging.info("    Writing file: '{0}'".format(file_name))
746     with open(file_name, "w") as file_handler:
747         file_handler.write(",".join(header) + "\n")
748         for test in tbl_sorted:
749             file_handler.write(",".join([str(item) for item in test]) + '\n')
750
751     txt_file_name = "{0}.txt".format(table["output-file"])
752     logging.info("    Writing file: '{0}'".format(txt_file_name))
753     convert_csv_to_pretty_txt(file_name, txt_file_name)
754
755
756 def table_failed_tests_html(table, input_data):
757     """Generate the table(s) with algorithm: table_failed_tests_html
758     specified in the specification file.
759
760     :param table: Table to generate.
761     :param input_data: Data to process.
762     :type table: pandas.Series
763     :type input_data: InputData
764     """
765
766     logging.info("  Generating the table {0} ...".
767                  format(table.get("title", "")))
768
769     try:
770         with open(table["input-file"], 'rb') as csv_file:
771             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
772             csv_lst = [item for item in csv_content]
773     except KeyError:
774         logging.warning("The input file is not defined.")
775         return
776     except csv.Error as err:
777         logging.warning("Not possible to process the file '{0}'.\n{1}".
778                         format(table["input-file"], err))
779         return
780
781     # Table:
782     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
783
784     # Table header:
785     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
786     for idx, item in enumerate(csv_lst[0]):
787         alignment = "left" if idx == 0 else "center"
788         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
789         th.text = item
790
791     # Rows:
792     colors = ("#e9f1fb", "#d4e4f7")
793     for r_idx, row in enumerate(csv_lst[1:]):
794         background = colors[r_idx % 2]
795         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
796
797         # Columns:
798         for c_idx, item in enumerate(row):
799             alignment = "left" if c_idx == 0 else "center"
800             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
801             # Name:
802             if c_idx == 0:
803                 url = _generate_url("../trending/", item)
804                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
805                 ref.text = item
806             else:
807                 td.text = item
808     try:
809         with open(table["output-file"], 'w') as html_file:
810             logging.info("    Writing file: '{0}'".format(table["output-file"]))
811             html_file.write(".. raw:: html\n\n\t")
812             html_file.write(ET.tostring(failed_tests))
813             html_file.write("\n\t<p><br><br></p>\n")
814     except KeyError:
815         logging.warning("The output file is not defined.")
816         return