CSIT-1208: Add new data to 1807 report
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
25
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 def generate_tables(spec, data):
32     """Generate all tables specified in the specification file.
33
34     :param spec: Specification read from the specification file.
35     :param data: Data to process.
36     :type spec: Specification
37     :type data: InputData
38     """
39
40     logging.info("Generating the tables ...")
41     for table in spec.tables:
42         try:
43             eval(table["algorithm"])(table, data)
44         except NameError as err:
45             logging.error("Probably algorithm '{alg}' is not defined: {err}".
46                           format(alg=table["algorithm"], err=repr(err)))
47     logging.info("Done.")
48
49
50 def table_details(table, input_data):
51     """Generate the table(s) with algorithm: table_detailed_test_results
52     specified in the specification file.
53
54     :param table: Table to generate.
55     :param input_data: Data to process.
56     :type table: pandas.Series
57     :type input_data: InputData
58     """
59
60     logging.info("  Generating the table {0} ...".
61                  format(table.get("title", "")))
62
63     # Transform the data
64     logging.info("    Creating the data set for the {0} '{1}'.".
65                  format(table.get("type", ""), table.get("title", "")))
66     data = input_data.filter_data(table)
67
68     # Prepare the header of the tables
69     header = list()
70     for column in table["columns"]:
71         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72
73     # Generate the data for the table according to the model in the table
74     # specification
75     job = table["data"].keys()[0]
76     build = str(table["data"][job][0])
77     try:
78         suites = input_data.suites(job, build)
79     except KeyError:
80         logging.error("    No data available. The table will not be generated.")
81         return
82
83     for suite_longname, suite in suites.iteritems():
84         # Generate data
85         suite_name = suite["name"]
86         table_lst = list()
87         for test in data[job][build].keys():
88             if data[job][build][test]["parent"] in suite_name:
89                 row_lst = list()
90                 for column in table["columns"]:
91                     try:
92                         col_data = str(data[job][build][test][column["data"].
93                                        split(" ")[1]]).replace('"', '""')
94                         if column["data"].split(" ")[1] in ("vat-history",
95                                                             "show-run"):
96                             col_data = replace(col_data, " |br| ", "",
97                                                maxreplace=1)
98                             col_data = " |prein| {0} |preout| ".\
99                                 format(col_data[:-5])
100                         row_lst.append('"{0}"'.format(col_data))
101                     except KeyError:
102                         row_lst.append("No data")
103                 table_lst.append(row_lst)
104
105         # Write the data to file
106         if table_lst:
107             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108                                             table["output-file-ext"])
109             logging.info("      Writing file: '{}'".format(file_name))
110             with open(file_name, "w") as file_handler:
111                 file_handler.write(",".join(header) + "\n")
112                 for item in table_lst:
113                     file_handler.write(",".join(item) + "\n")
114
115     logging.info("  Done.")
116
117
118 def table_merged_details(table, input_data):
119     """Generate the table(s) with algorithm: table_merged_details
120     specified in the specification file.
121
122     :param table: Table to generate.
123     :param input_data: Data to process.
124     :type table: pandas.Series
125     :type input_data: InputData
126     """
127
128     logging.info("  Generating the table {0} ...".
129                  format(table.get("title", "")))
130
131     # Transform the data
132     logging.info("    Creating the data set for the {0} '{1}'.".
133                  format(table.get("type", ""), table.get("title", "")))
134     data = input_data.filter_data(table)
135     data = input_data.merge_data(data)
136     data.sort_index(inplace=True)
137
138     logging.info("    Creating the data set for the {0} '{1}'.".
139                  format(table.get("type", ""), table.get("title", "")))
140     suites = input_data.filter_data(table, data_set="suites")
141     suites = input_data.merge_data(suites)
142
143     # Prepare the header of the tables
144     header = list()
145     for column in table["columns"]:
146         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
147
148     for _, suite in suites.iteritems():
149         # Generate data
150         suite_name = suite["name"]
151         table_lst = list()
152         for test in data.keys():
153             if data[test]["parent"] in suite_name:
154                 row_lst = list()
155                 for column in table["columns"]:
156                     try:
157                         col_data = str(data[test][column["data"].
158                                        split(" ")[1]]).replace('"', '""')
159                         if column["data"].split(" ")[1] in ("vat-history",
160                                                             "show-run"):
161                             col_data = replace(col_data, " |br| ", "",
162                                                maxreplace=1)
163                             col_data = " |prein| {0} |preout| ".\
164                                 format(col_data[:-5])
165                         row_lst.append('"{0}"'.format(col_data))
166                     except KeyError:
167                         row_lst.append("No data")
168                 table_lst.append(row_lst)
169
170         # Write the data to file
171         if table_lst:
172             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173                                             table["output-file-ext"])
174             logging.info("      Writing file: '{}'".format(file_name))
175             with open(file_name, "w") as file_handler:
176                 file_handler.write(",".join(header) + "\n")
177                 for item in table_lst:
178                     file_handler.write(",".join(item) + "\n")
179
180     logging.info("  Done.")
181
182
183 def table_performance_comparison(table, input_data):
184     """Generate the table(s) with algorithm: table_performance_comparison
185     specified in the specification file.
186
187     :param table: Table to generate.
188     :param input_data: Data to process.
189     :type table: pandas.Series
190     :type input_data: InputData
191     """
192
193     logging.info("  Generating the table {0} ...".
194                  format(table.get("title", "")))
195
196     # Transform the data
197     logging.info("    Creating the data set for the {0} '{1}'.".
198                  format(table.get("type", ""), table.get("title", "")))
199     data = input_data.filter_data(table, continue_on_error=True)
200
201     # Prepare the header of the tables
202     try:
203         header = ["Test case", ]
204
205         if table["include-tests"] == "MRR":
206             hdr_param = "Receive Rate"
207         else:
208             hdr_param = "Throughput"
209
210         history = table.get("history", None)
211         if history:
212             for item in history:
213                 header.extend(
214                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
215                      "{0} Stdev [Mpps]".format(item["title"])])
216         header.extend(
217             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
218              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
219              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
220              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
221              "Delta [%]"])
222         header_str = ",".join(header) + "\n"
223     except (AttributeError, KeyError) as err:
224         logging.error("The model is invalid, missing parameter: {0}".
225                       format(err))
226         return
227
228     # Prepare data to the table:
229     tbl_dict = dict()
230     for job, builds in table["reference"]["data"].items():
231         for build in builds:
232             for tst_name, tst_data in data[job][str(build)].iteritems():
233                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
234                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
235                     replace("-ndrdisc", "").replace("-pdr", "").\
236                     replace("-ndr", "").\
237                     replace("1t1c", "1c").replace("2t1c", "1c").\
238                     replace("2t2c", "2c").replace("4t2c", "2c").\
239                     replace("4t4c", "4c").replace("8t4c", "4c")
240                 if tbl_dict.get(tst_name_mod, None) is None:
241                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
242                                             "-".join(tst_data["name"].
243                                                      split("-")[:-1]))
244                     tbl_dict[tst_name_mod] = {"name": name,
245                                               "ref-data": list(),
246                                               "cmp-data": list()}
247                 try:
248                     # TODO: Re-work when NDRPDRDISC tests are not used
249                     if table["include-tests"] == "MRR":
250                         tbl_dict[tst_name_mod]["ref-data"]. \
251                             append(tst_data["result"]["receive-rate"].avg)
252                     elif table["include-tests"] == "PDR":
253                         if tst_data["type"] == "PDR":
254                             tbl_dict[tst_name_mod]["ref-data"]. \
255                                 append(tst_data["throughput"]["value"])
256                         elif tst_data["type"] == "NDRPDR":
257                             tbl_dict[tst_name_mod]["ref-data"].append(
258                                 tst_data["throughput"]["PDR"]["LOWER"])
259                     elif table["include-tests"] == "NDR":
260                         if tst_data["type"] == "NDR":
261                             tbl_dict[tst_name_mod]["ref-data"]. \
262                                 append(tst_data["throughput"]["value"])
263                         elif tst_data["type"] == "NDRPDR":
264                             tbl_dict[tst_name_mod]["ref-data"].append(
265                                 tst_data["throughput"]["NDR"]["LOWER"])
266                     else:
267                         continue
268                 except TypeError:
269                     pass  # No data in output.xml for this test
270
271     for job, builds in table["compare"]["data"].items():
272         for build in builds:
273             for tst_name, tst_data in data[job][str(build)].iteritems():
274                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
275                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
276                     replace("-ndrdisc", "").replace("-pdr", ""). \
277                     replace("-ndr", "").\
278                     replace("1t1c", "1c").replace("2t1c", "1c").\
279                     replace("2t2c", "2c").replace("4t2c", "2c").\
280                     replace("4t4c", "4c").replace("8t4c", "4c")
281                 try:
282                     # TODO: Re-work when NDRPDRDISC tests are not used
283                     if table["include-tests"] == "MRR":
284                         tbl_dict[tst_name_mod]["cmp-data"]. \
285                             append(tst_data["result"]["receive-rate"].avg)
286                     elif table["include-tests"] == "PDR":
287                         if tst_data["type"] == "PDR":
288                             tbl_dict[tst_name_mod]["cmp-data"]. \
289                                 append(tst_data["throughput"]["value"])
290                         elif tst_data["type"] == "NDRPDR":
291                             tbl_dict[tst_name_mod]["cmp-data"].append(
292                                 tst_data["throughput"]["PDR"]["LOWER"])
293                     elif table["include-tests"] == "NDR":
294                         if tst_data["type"] == "NDR":
295                             tbl_dict[tst_name_mod]["cmp-data"]. \
296                                 append(tst_data["throughput"]["value"])
297                         elif tst_data["type"] == "NDRPDR":
298                             tbl_dict[tst_name_mod]["cmp-data"].append(
299                                 tst_data["throughput"]["NDR"]["LOWER"])
300                     else:
301                         continue
302                 except KeyError:
303                     pass
304                 except TypeError:
305                     tbl_dict.pop(tst_name_mod, None)
306     if history:
307         for item in history:
308             for job, builds in item["data"].items():
309                 for build in builds:
310                     for tst_name, tst_data in data[job][str(build)].iteritems():
311                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
312                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
313                             replace("-ndrdisc", "").replace("-pdr", ""). \
314                             replace("-ndr", "").\
315                             replace("1t1c", "1c").replace("2t1c", "1c").\
316                             replace("2t2c", "2c").replace("4t2c", "2c").\
317                             replace("4t4c", "4c").replace("8t4c", "4c")
318                         if tbl_dict.get(tst_name_mod, None) is None:
319                             continue
320                         if tbl_dict[tst_name_mod].get("history", None) is None:
321                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
322                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
323                                                              None) is None:
324                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
325                                 list()
326                         try:
327                             # TODO: Re-work when NDRPDRDISC tests are not used
328                             if table["include-tests"] == "MRR":
329                                 tbl_dict[tst_name_mod]["history"][item["title"
330                                 ]].append(tst_data["result"]["receive-rate"].
331                                           avg)
332                             elif table["include-tests"] == "PDR":
333                                 if tst_data["type"] == "PDR":
334                                     tbl_dict[tst_name_mod]["history"][
335                                         item["title"]].\
336                                         append(tst_data["throughput"]["value"])
337                                 elif tst_data["type"] == "NDRPDR":
338                                     tbl_dict[tst_name_mod]["history"][item[
339                                         "title"]].append(tst_data["throughput"][
340                                         "PDR"]["LOWER"])
341                             elif table["include-tests"] == "NDR":
342                                 if tst_data["type"] == "NDR":
343                                     tbl_dict[tst_name_mod]["history"][
344                                         item["title"]].\
345                                         append(tst_data["throughput"]["value"])
346                                 elif tst_data["type"] == "NDRPDR":
347                                     tbl_dict[tst_name_mod]["history"][item[
348                                         "title"]].append(tst_data["throughput"][
349                                         "NDR"]["LOWER"])
350                             else:
351                                 continue
352                         except (TypeError, KeyError):
353                             pass
354
355     tbl_lst = list()
356     for tst_name in tbl_dict.keys():
357         item = [tbl_dict[tst_name]["name"], ]
358         if history:
359             if tbl_dict[tst_name].get("history", None) is not None:
360                 for hist_data in tbl_dict[tst_name]["history"].values():
361                     if hist_data:
362                         item.append(round(mean(hist_data) / 1000000, 2))
363                         item.append(round(stdev(hist_data) / 1000000, 2))
364                     else:
365                         item.extend([None, None])
366             else:
367                 item.extend([None, None])
368         data_t = tbl_dict[tst_name]["ref-data"]
369         if data_t:
370             item.append(round(mean(data_t) / 1000000, 2))
371             item.append(round(stdev(data_t) / 1000000, 2))
372         else:
373             item.extend([None, None])
374         data_t = tbl_dict[tst_name]["cmp-data"]
375         if data_t:
376             item.append(round(mean(data_t) / 1000000, 2))
377             item.append(round(stdev(data_t) / 1000000, 2))
378         else:
379             item.extend([None, None])
380         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
381             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
382         if len(item) == len(header):
383             tbl_lst.append(item)
384
385     # Sort the table according to the relative change
386     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
387
388     # Generate csv tables:
389     csv_file = "{0}.csv".format(table["output-file"])
390     with open(csv_file, "w") as file_handler:
391         file_handler.write(header_str)
392         for test in tbl_lst:
393             file_handler.write(",".join([str(item) for item in test]) + "\n")
394
395     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
396
397
398 def table_performance_trending_dashboard(table, input_data):
399     """Generate the table(s) with algorithm:
400     table_performance_trending_dashboard
401     specified in the specification file.
402
403     :param table: Table to generate.
404     :param input_data: Data to process.
405     :type table: pandas.Series
406     :type input_data: InputData
407     """
408
409     logging.info("  Generating the table {0} ...".
410                  format(table.get("title", "")))
411
412     # Transform the data
413     logging.info("    Creating the data set for the {0} '{1}'.".
414                  format(table.get("type", ""), table.get("title", "")))
415     data = input_data.filter_data(table, continue_on_error=True)
416
417     # Prepare the header of the tables
418     header = ["Test Case",
419               "Trend [Mpps]",
420               "Short-Term Change [%]",
421               "Long-Term Change [%]",
422               "Regressions [#]",
423               "Progressions [#]"
424               ]
425     header_str = ",".join(header) + "\n"
426
427     # Prepare data to the table:
428     tbl_dict = dict()
429     for job, builds in table["data"].items():
430         for build in builds:
431             for tst_name, tst_data in data[job][str(build)].iteritems():
432                 if tst_name.lower() in table["ignore-list"]:
433                     continue
434                 if tbl_dict.get(tst_name, None) is None:
435                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
436                                             tst_data["name"])
437                     tbl_dict[tst_name] = {"name": name,
438                                           "data": OrderedDict()}
439                 try:
440                     tbl_dict[tst_name]["data"][str(build)] = \
441                         tst_data["result"]["receive-rate"]
442                 except (TypeError, KeyError):
443                     pass  # No data in output.xml for this test
444
445     tbl_lst = list()
446     for tst_name in tbl_dict.keys():
447         data_t = tbl_dict[tst_name]["data"]
448         if len(data_t) < 2:
449             continue
450
451         classification_lst, avgs = classify_anomalies(data_t)
452
453         win_size = min(len(data_t), table["window"])
454         long_win_size = min(len(data_t), table["long-trend-window"])
455
456         try:
457             max_long_avg = max(
458                 [x for x in avgs[-long_win_size:-win_size]
459                  if not isnan(x)])
460         except ValueError:
461             max_long_avg = nan
462         last_avg = avgs[-1]
463         avg_week_ago = avgs[max(-win_size, -len(avgs))]
464
465         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
466             rel_change_last = nan
467         else:
468             rel_change_last = round(
469                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
470
471         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
472             rel_change_long = nan
473         else:
474             rel_change_long = round(
475                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
476
477         if classification_lst:
478             if isnan(rel_change_last) and isnan(rel_change_long):
479                 continue
480             tbl_lst.append(
481                 [tbl_dict[tst_name]["name"],
482                  '-' if isnan(last_avg) else
483                  round(last_avg / 1000000, 2),
484                  '-' if isnan(rel_change_last) else rel_change_last,
485                  '-' if isnan(rel_change_long) else rel_change_long,
486                  classification_lst[-win_size:].count("regression"),
487                  classification_lst[-win_size:].count("progression")])
488
489     tbl_lst.sort(key=lambda rel: rel[0])
490
491     tbl_sorted = list()
492     for nrr in range(table["window"], -1, -1):
493         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
494         for nrp in range(table["window"], -1, -1):
495             tbl_out = [item for item in tbl_reg if item[5] == nrp]
496             tbl_out.sort(key=lambda rel: rel[2])
497             tbl_sorted.extend(tbl_out)
498
499     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
500
501     logging.info("    Writing file: '{0}'".format(file_name))
502     with open(file_name, "w") as file_handler:
503         file_handler.write(header_str)
504         for test in tbl_sorted:
505             file_handler.write(",".join([str(item) for item in test]) + '\n')
506
507     txt_file_name = "{0}.txt".format(table["output-file"])
508     logging.info("    Writing file: '{0}'".format(txt_file_name))
509     convert_csv_to_pretty_txt(file_name, txt_file_name)
510
511
512 def _generate_url(base, test_name):
513     """Generate URL to a trending plot from the name of the test case.
514
515     :param base: The base part of URL common to all test cases.
516     :param test_name: The name of the test case.
517     :type base: str
518     :type test_name: str
519     :returns: The URL to the plot with the trending data for the given test
520         case.
521     :rtype str
522     """
523
524     url = base
525     file_name = ""
526     anchor = "#"
527     feature = ""
528
529     if "lbdpdk" in test_name or "lbvpp" in test_name:
530         file_name = "link_bonding.html"
531
532     elif "testpmd" in test_name or "l3fwd" in test_name:
533         file_name = "dpdk.html"
534
535     elif "memif" in test_name:
536         file_name = "container_memif.html"
537
538     elif "srv6" in test_name:
539         file_name = "srv6.html"
540
541     elif "vhost" in test_name:
542         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
543             file_name = "vm_vhost_l2.html"
544         elif "ip4base" in test_name:
545             file_name = "vm_vhost_ip4.html"
546
547     elif "ipsec" in test_name:
548         file_name = "ipsec.html"
549
550     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
551         file_name = "ip4_tunnels.html"
552
553     elif "ip4base" in test_name or "ip4scale" in test_name:
554         file_name = "ip4.html"
555         if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
556             feature = "-features"
557
558     elif "ip6base" in test_name or "ip6scale" in test_name:
559         file_name = "ip6.html"
560
561     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
562             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
563             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
564         file_name = "l2.html"
565         if "iacl" in test_name:
566             feature = "-features"
567
568     if "x520" in test_name:
569         anchor += "x520-"
570     elif "x710" in test_name:
571         anchor += "x710-"
572     elif "xl710" in test_name:
573         anchor += "xl710-"
574
575     if "64b" in test_name:
576         anchor += "64b-"
577     elif "78b" in test_name:
578         anchor += "78b-"
579     elif "imix" in test_name:
580         anchor += "imix-"
581     elif "9000b" in test_name:
582         anchor += "9000b-"
583     elif "1518" in test_name:
584         anchor += "1518b-"
585
586     if "1t1c" in test_name:
587         anchor += "1t1c"
588     elif "2t2c" in test_name:
589         anchor += "2t2c"
590     elif "4t4c" in test_name:
591         anchor += "4t4c"
592
593     return url + file_name + anchor + feature
594
595
596 def table_performance_trending_dashboard_html(table, input_data):
597     """Generate the table(s) with algorithm:
598     table_performance_trending_dashboard_html specified in the specification
599     file.
600
601     :param table: Table to generate.
602     :param input_data: Data to process.
603     :type table: pandas.Series
604     :type input_data: InputData
605     """
606
607     logging.info("  Generating the table {0} ...".
608                  format(table.get("title", "")))
609
610     try:
611         with open(table["input-file"], 'rb') as csv_file:
612             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
613             csv_lst = [item for item in csv_content]
614     except KeyError:
615         logging.warning("The input file is not defined.")
616         return
617     except csv.Error as err:
618         logging.warning("Not possible to process the file '{0}'.\n{1}".
619                         format(table["input-file"], err))
620         return
621
622     # Table:
623     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
624
625     # Table header:
626     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
627     for idx, item in enumerate(csv_lst[0]):
628         alignment = "left" if idx == 0 else "center"
629         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
630         th.text = item
631
632     # Rows:
633     colors = {"regression": ("#ffcccc", "#ff9999"),
634               "progression": ("#c6ecc6", "#9fdf9f"),
635               "normal": ("#e9f1fb", "#d4e4f7")}
636     for r_idx, row in enumerate(csv_lst[1:]):
637         if int(row[4]):
638             color = "regression"
639         elif int(row[5]):
640             color = "progression"
641         else:
642             color = "normal"
643         background = colors[color][r_idx % 2]
644         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
645
646         # Columns:
647         for c_idx, item in enumerate(row):
648             alignment = "left" if c_idx == 0 else "center"
649             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
650             # Name:
651             if c_idx == 0:
652                 url = _generate_url("../trending/", item)
653                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
654                 ref.text = item
655             else:
656                 td.text = item
657     try:
658         with open(table["output-file"], 'w') as html_file:
659             logging.info("    Writing file: '{0}'".format(table["output-file"]))
660             html_file.write(".. raw:: html\n\n\t")
661             html_file.write(ET.tostring(dashboard))
662             html_file.write("\n\t<p><br><br></p>\n")
663     except KeyError:
664         logging.warning("The output file is not defined.")
665         return
666
667
668 def table_failed_tests(table, input_data):
669     """Generate the table(s) with algorithm: table_failed_tests
670     specified in the specification file.
671
672     :param table: Table to generate.
673     :param input_data: Data to process.
674     :type table: pandas.Series
675     :type input_data: InputData
676     """
677
678     logging.info("  Generating the table {0} ...".
679                  format(table.get("title", "")))
680
681     # Transform the data
682     logging.info("    Creating the data set for the {0} '{1}'.".
683                  format(table.get("type", ""), table.get("title", "")))
684     data = input_data.filter_data(table, continue_on_error=True)
685
686     # Prepare the header of the tables
687     header = ["Test Case",
688               "Failures [#]",
689               "Last Failure [Time]",
690               "Last Failure [VPP-Build-Id]",
691               "Last Failure [CSIT-Job-Build-Id]"]
692
693     # Generate the data for the table according to the model in the table
694     # specification
695     tbl_dict = dict()
696     for job, builds in table["data"].items():
697         for build in builds:
698             build = str(build)
699             for tst_name, tst_data in data[job][build].iteritems():
700                 if tst_name.lower() in table["ignore-list"]:
701                     continue
702                 if tbl_dict.get(tst_name, None) is None:
703                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
704                                             tst_data["name"])
705                     tbl_dict[tst_name] = {"name": name,
706                                           "data": OrderedDict()}
707                 try:
708                     tbl_dict[tst_name]["data"][build] = (
709                         tst_data["status"],
710                         input_data.metadata(job, build).get("generated", ""),
711                         input_data.metadata(job, build).get("version", ""),
712                         build)
713                 except (TypeError, KeyError):
714                     pass  # No data in output.xml for this test
715
716     tbl_lst = list()
717     for tst_data in tbl_dict.values():
718         win_size = min(len(tst_data["data"]), table["window"])
719         fails_nr = 0
720         for val in tst_data["data"].values()[-win_size:]:
721             if val[0] == "FAIL":
722                 fails_nr += 1
723                 fails_last_date = val[1]
724                 fails_last_vpp = val[2]
725                 fails_last_csit = val[3]
726         if fails_nr:
727             tbl_lst.append([tst_data["name"],
728                             fails_nr,
729                             fails_last_date,
730                             fails_last_vpp,
731                             "mrr-daily-build-{0}".format(fails_last_csit)])
732
733     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
734     tbl_sorted = list()
735     for nrf in range(table["window"], -1, -1):
736         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
737         tbl_sorted.extend(tbl_fails)
738     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
739
740     logging.info("    Writing file: '{0}'".format(file_name))
741     with open(file_name, "w") as file_handler:
742         file_handler.write(",".join(header) + "\n")
743         for test in tbl_sorted:
744             file_handler.write(",".join([str(item) for item in test]) + '\n')
745
746     txt_file_name = "{0}.txt".format(table["output-file"])
747     logging.info("    Writing file: '{0}'".format(txt_file_name))
748     convert_csv_to_pretty_txt(file_name, txt_file_name)
749
750
751 def table_failed_tests_html(table, input_data):
752     """Generate the table(s) with algorithm: table_failed_tests_html
753     specified in the specification file.
754
755     :param table: Table to generate.
756     :param input_data: Data to process.
757     :type table: pandas.Series
758     :type input_data: InputData
759     """
760
761     logging.info("  Generating the table {0} ...".
762                  format(table.get("title", "")))
763
764     try:
765         with open(table["input-file"], 'rb') as csv_file:
766             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
767             csv_lst = [item for item in csv_content]
768     except KeyError:
769         logging.warning("The input file is not defined.")
770         return
771     except csv.Error as err:
772         logging.warning("Not possible to process the file '{0}'.\n{1}".
773                         format(table["input-file"], err))
774         return
775
776     # Table:
777     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
778
779     # Table header:
780     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
781     for idx, item in enumerate(csv_lst[0]):
782         alignment = "left" if idx == 0 else "center"
783         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
784         th.text = item
785
786     # Rows:
787     colors = ("#e9f1fb", "#d4e4f7")
788     for r_idx, row in enumerate(csv_lst[1:]):
789         background = colors[r_idx % 2]
790         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
791
792         # Columns:
793         for c_idx, item in enumerate(row):
794             alignment = "left" if c_idx == 0 else "center"
795             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
796             # Name:
797             if c_idx == 0:
798                 url = _generate_url("../trending/", item)
799                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
800                 ref.text = item
801             else:
802                 td.text = item
803     try:
804         with open(table["output-file"], 'w') as html_file:
805             logging.info("    Writing file: '{0}'".format(table["output-file"]))
806             html_file.write(".. raw:: html\n\n\t")
807             html_file.write(ET.tostring(failed_tests))
808             html_file.write("\n\t<p><br><br></p>\n")
809     except KeyError:
810         logging.warning("The output file is not defined.")
811         return