PAL: Remove unused code
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
25
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 def generate_tables(spec, data):
32     """Generate all tables specified in the specification file.
33
34     :param spec: Specification read from the specification file.
35     :param data: Data to process.
36     :type spec: Specification
37     :type data: InputData
38     """
39
40     logging.info("Generating the tables ...")
41     for table in spec.tables:
42         try:
43             eval(table["algorithm"])(table, data)
44         except NameError as err:
45             logging.error("Probably algorithm '{alg}' is not defined: {err}".
46                           format(alg=table["algorithm"], err=repr(err)))
47     logging.info("Done.")
48
49
50 def table_details(table, input_data):
51     """Generate the table(s) with algorithm: table_detailed_test_results
52     specified in the specification file.
53
54     :param table: Table to generate.
55     :param input_data: Data to process.
56     :type table: pandas.Series
57     :type input_data: InputData
58     """
59
60     logging.info("  Generating the table {0} ...".
61                  format(table.get("title", "")))
62
63     # Transform the data
64     logging.info("    Creating the data set for the {0} '{1}'.".
65                  format(table.get("type", ""), table.get("title", "")))
66     data = input_data.filter_data(table)
67
68     # Prepare the header of the tables
69     header = list()
70     for column in table["columns"]:
71         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72
73     # Generate the data for the table according to the model in the table
74     # specification
75     job = table["data"].keys()[0]
76     build = str(table["data"][job][0])
77     try:
78         suites = input_data.suites(job, build)
79     except KeyError:
80         logging.error("    No data available. The table will not be generated.")
81         return
82
83     for suite_longname, suite in suites.iteritems():
84         # Generate data
85         suite_name = suite["name"]
86         table_lst = list()
87         for test in data[job][build].keys():
88             if data[job][build][test]["parent"] in suite_name:
89                 row_lst = list()
90                 for column in table["columns"]:
91                     try:
92                         col_data = str(data[job][build][test][column["data"].
93                                        split(" ")[1]]).replace('"', '""')
94                         if column["data"].split(" ")[1] in ("vat-history",
95                                                             "show-run"):
96                             col_data = replace(col_data, " |br| ", "",
97                                                maxreplace=1)
98                             col_data = " |prein| {0} |preout| ".\
99                                 format(col_data[:-5])
100                         row_lst.append('"{0}"'.format(col_data))
101                     except KeyError:
102                         row_lst.append("No data")
103                 table_lst.append(row_lst)
104
105         # Write the data to file
106         if table_lst:
107             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108                                             table["output-file-ext"])
109             logging.info("      Writing file: '{}'".format(file_name))
110             with open(file_name, "w") as file_handler:
111                 file_handler.write(",".join(header) + "\n")
112                 for item in table_lst:
113                     file_handler.write(",".join(item) + "\n")
114
115     logging.info("  Done.")
116
117
118 def table_merged_details(table, input_data):
119     """Generate the table(s) with algorithm: table_merged_details
120     specified in the specification file.
121
122     :param table: Table to generate.
123     :param input_data: Data to process.
124     :type table: pandas.Series
125     :type input_data: InputData
126     """
127
128     logging.info("  Generating the table {0} ...".
129                  format(table.get("title", "")))
130
131     # Transform the data
132     logging.info("    Creating the data set for the {0} '{1}'.".
133                  format(table.get("type", ""), table.get("title", "")))
134     data = input_data.filter_data(table)
135     data = input_data.merge_data(data)
136     data.sort_index(inplace=True)
137
138     logging.info("    Creating the data set for the {0} '{1}'.".
139                  format(table.get("type", ""), table.get("title", "")))
140     suites = input_data.filter_data(table, data_set="suites")
141     suites = input_data.merge_data(suites)
142
143     # Prepare the header of the tables
144     header = list()
145     for column in table["columns"]:
146         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
147
148     for _, suite in suites.iteritems():
149         # Generate data
150         suite_name = suite["name"]
151         table_lst = list()
152         for test in data.keys():
153             if data[test]["parent"] in suite_name:
154                 row_lst = list()
155                 for column in table["columns"]:
156                     try:
157                         col_data = str(data[test][column["data"].
158                                        split(" ")[1]]).replace('"', '""')
159                         if column["data"].split(" ")[1] in ("vat-history",
160                                                             "show-run"):
161                             col_data = replace(col_data, " |br| ", "",
162                                                maxreplace=1)
163                             col_data = " |prein| {0} |preout| ".\
164                                 format(col_data[:-5])
165                         row_lst.append('"{0}"'.format(col_data))
166                     except KeyError:
167                         row_lst.append("No data")
168                 table_lst.append(row_lst)
169
170         # Write the data to file
171         if table_lst:
172             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173                                             table["output-file-ext"])
174             logging.info("      Writing file: '{}'".format(file_name))
175             with open(file_name, "w") as file_handler:
176                 file_handler.write(",".join(header) + "\n")
177                 for item in table_lst:
178                     file_handler.write(",".join(item) + "\n")
179
180     logging.info("  Done.")
181
182
183 def table_performance_comparison(table, input_data):
184     """Generate the table(s) with algorithm: table_performance_comparison
185     specified in the specification file.
186
187     :param table: Table to generate.
188     :param input_data: Data to process.
189     :type table: pandas.Series
190     :type input_data: InputData
191     """
192
193     logging.info("  Generating the table {0} ...".
194                  format(table.get("title", "")))
195
196     # Transform the data
197     logging.info("    Creating the data set for the {0} '{1}'.".
198                  format(table.get("type", ""), table.get("title", "")))
199     data = input_data.filter_data(table, continue_on_error=True)
200
201     # Prepare the header of the tables
202     try:
203         header = ["Test case", ]
204
205         if table["include-tests"] == "MRR":
206             hdr_param = "Receive Rate"
207         else:
208             hdr_param = "Throughput"
209
210         history = table.get("history", None)
211         if history:
212             for item in history:
213                 header.extend(
214                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
215                      "{0} Stdev [Mpps]".format(item["title"])])
216         header.extend(
217             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
218              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
219              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
220              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
221              "Delta [%]"])
222         header_str = ",".join(header) + "\n"
223     except (AttributeError, KeyError) as err:
224         logging.error("The model is invalid, missing parameter: {0}".
225                       format(err))
226         return
227
228     # Prepare data to the table:
229     tbl_dict = dict()
230     for job, builds in table["reference"]["data"].items():
231         for build in builds:
232             for tst_name, tst_data in data[job][str(build)].iteritems():
233                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
234                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
235                     replace("-ndrdisc", "").replace("-pdr", "").\
236                     replace("-ndr", "")
237                 if tbl_dict.get(tst_name_mod, None) is None:
238                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
239                                             "-".join(tst_data["name"].
240                                                      split("-")[:-1]))
241                     tbl_dict[tst_name_mod] = {"name": name,
242                                               "ref-data": list(),
243                                               "cmp-data": list()}
244                 try:
245                     # TODO: Re-work when NDRPDRDISC tests are not used
246                     if table["include-tests"] == "MRR":
247                         tbl_dict[tst_name_mod]["ref-data"]. \
248                             append(tst_data["result"]["receive-rate"].avg)
249                     elif table["include-tests"] == "PDR":
250                         if tst_data["type"] == "PDR":
251                             tbl_dict[tst_name_mod]["ref-data"]. \
252                                 append(tst_data["throughput"]["value"])
253                         elif tst_data["type"] == "NDRPDR":
254                             tbl_dict[tst_name_mod]["ref-data"].append(
255                                 tst_data["throughput"]["PDR"]["LOWER"])
256                     elif table["include-tests"] == "NDR":
257                         if tst_data["type"] == "NDR":
258                             tbl_dict[tst_name_mod]["ref-data"]. \
259                                 append(tst_data["throughput"]["value"])
260                         elif tst_data["type"] == "NDRPDR":
261                             tbl_dict[tst_name_mod]["ref-data"].append(
262                                 tst_data["throughput"]["NDR"]["LOWER"])
263                     else:
264                         continue
265                 except TypeError:
266                     pass  # No data in output.xml for this test
267
268     for job, builds in table["compare"]["data"].items():
269         for build in builds:
270             for tst_name, tst_data in data[job][str(build)].iteritems():
271                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
272                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
273                     replace("-ndrdisc", "").replace("-pdr", ""). \
274                     replace("-ndr", "")
275                 try:
276                     # TODO: Re-work when NDRPDRDISC tests are not used
277                     if table["include-tests"] == "MRR":
278                         tbl_dict[tst_name_mod]["cmp-data"]. \
279                             append(tst_data["result"]["receive-rate"].avg)
280                     elif table["include-tests"] == "PDR":
281                         if tst_data["type"] == "PDR":
282                             tbl_dict[tst_name_mod]["cmp-data"]. \
283                                 append(tst_data["throughput"]["value"])
284                         elif tst_data["type"] == "NDRPDR":
285                             tbl_dict[tst_name_mod]["cmp-data"].append(
286                                 tst_data["throughput"]["PDR"]["LOWER"])
287                     elif table["include-tests"] == "NDR":
288                         if tst_data["type"] == "NDR":
289                             tbl_dict[tst_name_mod]["cmp-data"]. \
290                                 append(tst_data["throughput"]["value"])
291                         elif tst_data["type"] == "NDRPDR":
292                             tbl_dict[tst_name_mod]["cmp-data"].append(
293                                 tst_data["throughput"]["NDR"]["LOWER"])
294                     else:
295                         continue
296                 except KeyError:
297                     pass
298                 except TypeError:
299                     tbl_dict.pop(tst_name_mod, None)
300     if history:
301         for item in history:
302             for job, builds in item["data"].items():
303                 for build in builds:
304                     for tst_name, tst_data in data[job][str(build)].iteritems():
305                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
306                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
307                             replace("-ndrdisc", "").replace("-pdr", ""). \
308                             replace("-ndr", "")
309                         if tbl_dict.get(tst_name_mod, None) is None:
310                             continue
311                         if tbl_dict[tst_name_mod].get("history", None) is None:
312                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
313                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
314                                                              None) is None:
315                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
316                                 list()
317                         try:
318                             # TODO: Re-work when NDRPDRDISC tests are not used
319                             if table["include-tests"] == "MRR":
320                                 tbl_dict[tst_name_mod]["history"][item["title"
321                                 ]].append(tst_data["result"]["receive-rate"].
322                                           avg)
323                             elif table["include-tests"] == "PDR":
324                                 if tst_data["type"] == "PDR":
325                                     tbl_dict[tst_name_mod]["history"][
326                                         item["title"]].\
327                                         append(tst_data["throughput"]["value"])
328                                 elif tst_data["type"] == "NDRPDR":
329                                     tbl_dict[tst_name_mod]["history"][item[
330                                         "title"]].append(tst_data["throughput"][
331                                         "PDR"]["LOWER"])
332                             elif table["include-tests"] == "NDR":
333                                 if tst_data["type"] == "NDR":
334                                     tbl_dict[tst_name_mod]["history"][
335                                         item["title"]].\
336                                         append(tst_data["throughput"]["value"])
337                                 elif tst_data["type"] == "NDRPDR":
338                                     tbl_dict[tst_name_mod]["history"][item[
339                                         "title"]].append(tst_data["throughput"][
340                                         "NDR"]["LOWER"])
341                             else:
342                                 continue
343                         except (TypeError, KeyError):
344                             pass
345
346     tbl_lst = list()
347     for tst_name in tbl_dict.keys():
348         item = [tbl_dict[tst_name]["name"], ]
349         if history:
350             if tbl_dict[tst_name].get("history", None) is not None:
351                 for hist_data in tbl_dict[tst_name]["history"].values():
352                     if hist_data:
353                         item.append(round(mean(hist_data) / 1000000, 2))
354                         item.append(round(stdev(hist_data) / 1000000, 2))
355                     else:
356                         item.extend([None, None])
357             else:
358                 item.extend([None, None])
359         data_t = tbl_dict[tst_name]["ref-data"]
360         if data_t:
361             item.append(round(mean(data_t) / 1000000, 2))
362             item.append(round(stdev(data_t) / 1000000, 2))
363         else:
364             item.extend([None, None])
365         data_t = tbl_dict[tst_name]["cmp-data"]
366         if data_t:
367             item.append(round(mean(data_t) / 1000000, 2))
368             item.append(round(stdev(data_t) / 1000000, 2))
369         else:
370             item.extend([None, None])
371         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
372             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
373         if len(item) == len(header):
374             tbl_lst.append(item)
375
376     # Sort the table according to the relative change
377     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
378
379     # Generate csv tables:
380     csv_file = "{0}.csv".format(table["output-file"])
381     with open(csv_file, "w") as file_handler:
382         file_handler.write(header_str)
383         for test in tbl_lst:
384             file_handler.write(",".join([str(item) for item in test]) + "\n")
385
386     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
387
388
389 def table_performance_trending_dashboard(table, input_data):
390     """Generate the table(s) with algorithm:
391     table_performance_trending_dashboard
392     specified in the specification file.
393
394     :param table: Table to generate.
395     :param input_data: Data to process.
396     :type table: pandas.Series
397     :type input_data: InputData
398     """
399
400     logging.info("  Generating the table {0} ...".
401                  format(table.get("title", "")))
402
403     # Transform the data
404     logging.info("    Creating the data set for the {0} '{1}'.".
405                  format(table.get("type", ""), table.get("title", "")))
406     data = input_data.filter_data(table, continue_on_error=True)
407
408     # Prepare the header of the tables
409     header = ["Test Case",
410               "Trend [Mpps]",
411               "Short-Term Change [%]",
412               "Long-Term Change [%]",
413               "Regressions [#]",
414               "Progressions [#]"
415               ]
416     header_str = ",".join(header) + "\n"
417
418     # Prepare data to the table:
419     tbl_dict = dict()
420     for job, builds in table["data"].items():
421         for build in builds:
422             for tst_name, tst_data in data[job][str(build)].iteritems():
423                 if tst_name.lower() in table["ignore-list"]:
424                     continue
425                 if tbl_dict.get(tst_name, None) is None:
426                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
427                                             tst_data["name"])
428                     tbl_dict[tst_name] = {"name": name,
429                                           "data": OrderedDict()}
430                 try:
431                     tbl_dict[tst_name]["data"][str(build)] = \
432                         tst_data["result"]["receive-rate"]
433                 except (TypeError, KeyError):
434                     pass  # No data in output.xml for this test
435
436     tbl_lst = list()
437     for tst_name in tbl_dict.keys():
438         data_t = tbl_dict[tst_name]["data"]
439         if len(data_t) < 2:
440             continue
441
442         classification_lst, avgs = classify_anomalies(data_t)
443
444         win_size = min(len(data_t), table["window"])
445         long_win_size = min(len(data_t), table["long-trend-window"])
446
447         try:
448             max_long_avg = max(
449                 [x for x in avgs[-long_win_size:-win_size]
450                  if not isnan(x)])
451         except ValueError:
452             max_long_avg = nan
453         last_avg = avgs[-1]
454         avg_week_ago = avgs[max(-win_size, -len(avgs))]
455
456         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
457             rel_change_last = nan
458         else:
459             rel_change_last = round(
460                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
461
462         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
463             rel_change_long = nan
464         else:
465             rel_change_long = round(
466                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
467
468         if classification_lst:
469             if isnan(rel_change_last) and isnan(rel_change_long):
470                 continue
471             tbl_lst.append(
472                 [tbl_dict[tst_name]["name"],
473                  '-' if isnan(last_avg) else
474                  round(last_avg / 1000000, 2),
475                  '-' if isnan(rel_change_last) else rel_change_last,
476                  '-' if isnan(rel_change_long) else rel_change_long,
477                  classification_lst[-win_size:].count("regression"),
478                  classification_lst[-win_size:].count("progression")])
479
480     tbl_lst.sort(key=lambda rel: rel[0])
481
482     tbl_sorted = list()
483     for nrr in range(table["window"], -1, -1):
484         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
485         for nrp in range(table["window"], -1, -1):
486             tbl_out = [item for item in tbl_reg if item[5] == nrp]
487             tbl_out.sort(key=lambda rel: rel[2])
488             tbl_sorted.extend(tbl_out)
489
490     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
491
492     logging.info("    Writing file: '{0}'".format(file_name))
493     with open(file_name, "w") as file_handler:
494         file_handler.write(header_str)
495         for test in tbl_sorted:
496             file_handler.write(",".join([str(item) for item in test]) + '\n')
497
498     txt_file_name = "{0}.txt".format(table["output-file"])
499     logging.info("    Writing file: '{0}'".format(txt_file_name))
500     convert_csv_to_pretty_txt(file_name, txt_file_name)
501
502
503 def _generate_url(base, test_name):
504     """Generate URL to a trending plot from the name of the test case.
505
506     :param base: The base part of URL common to all test cases.
507     :param test_name: The name of the test case.
508     :type base: str
509     :type test_name: str
510     :returns: The URL to the plot with the trending data for the given test
511         case.
512     :rtype str
513     """
514
515     url = base
516     file_name = ""
517     anchor = "#"
518     feature = ""
519
520     if "lbdpdk" in test_name or "lbvpp" in test_name:
521         file_name = "link_bonding.html"
522
523     elif "testpmd" in test_name or "l3fwd" in test_name:
524         file_name = "dpdk.html"
525
526     elif "memif" in test_name:
527         file_name = "container_memif.html"
528
529     elif "srv6" in test_name:
530         file_name = "srv6.html"
531
532     elif "vhost" in test_name:
533         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
534             file_name = "vm_vhost_l2.html"
535         elif "ip4base" in test_name:
536             file_name = "vm_vhost_ip4.html"
537
538     elif "ipsec" in test_name:
539         file_name = "ipsec.html"
540
541     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
542         file_name = "ip4_tunnels.html"
543
544     elif "ip4base" in test_name or "ip4scale" in test_name:
545         file_name = "ip4.html"
546         if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
547             feature = "-features"
548
549     elif "ip6base" in test_name or "ip6scale" in test_name:
550         file_name = "ip6.html"
551
552     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
553             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
554             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
555         file_name = "l2.html"
556         if "iacl" in test_name:
557             feature = "-features"
558
559     if "x520" in test_name:
560         anchor += "x520-"
561     elif "x710" in test_name:
562         anchor += "x710-"
563     elif "xl710" in test_name:
564         anchor += "xl710-"
565
566     if "64b" in test_name:
567         anchor += "64b-"
568     elif "78b" in test_name:
569         anchor += "78b-"
570     elif "imix" in test_name:
571         anchor += "imix-"
572     elif "9000b" in test_name:
573         anchor += "9000b-"
574     elif "1518" in test_name:
575         anchor += "1518b-"
576
577     if "1t1c" in test_name:
578         anchor += "1t1c"
579     elif "2t2c" in test_name:
580         anchor += "2t2c"
581     elif "4t4c" in test_name:
582         anchor += "4t4c"
583
584     return url + file_name + anchor + feature
585
586
587 def table_performance_trending_dashboard_html(table, input_data):
588     """Generate the table(s) with algorithm:
589     table_performance_trending_dashboard_html specified in the specification
590     file.
591
592     :param table: Table to generate.
593     :param input_data: Data to process.
594     :type table: pandas.Series
595     :type input_data: InputData
596     """
597
598     logging.info("  Generating the table {0} ...".
599                  format(table.get("title", "")))
600
601     try:
602         with open(table["input-file"], 'rb') as csv_file:
603             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
604             csv_lst = [item for item in csv_content]
605     except KeyError:
606         logging.warning("The input file is not defined.")
607         return
608     except csv.Error as err:
609         logging.warning("Not possible to process the file '{0}'.\n{1}".
610                         format(table["input-file"], err))
611         return
612
613     # Table:
614     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
615
616     # Table header:
617     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
618     for idx, item in enumerate(csv_lst[0]):
619         alignment = "left" if idx == 0 else "center"
620         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
621         th.text = item
622
623     # Rows:
624     colors = {"regression": ("#ffcccc", "#ff9999"),
625               "progression": ("#c6ecc6", "#9fdf9f"),
626               "normal": ("#e9f1fb", "#d4e4f7")}
627     for r_idx, row in enumerate(csv_lst[1:]):
628         if int(row[4]):
629             color = "regression"
630         elif int(row[5]):
631             color = "progression"
632         else:
633             color = "normal"
634         background = colors[color][r_idx % 2]
635         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
636
637         # Columns:
638         for c_idx, item in enumerate(row):
639             alignment = "left" if c_idx == 0 else "center"
640             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
641             # Name:
642             if c_idx == 0:
643                 url = _generate_url("../trending/", item)
644                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
645                 ref.text = item
646             else:
647                 td.text = item
648     try:
649         with open(table["output-file"], 'w') as html_file:
650             logging.info("    Writing file: '{0}'".format(table["output-file"]))
651             html_file.write(".. raw:: html\n\n\t")
652             html_file.write(ET.tostring(dashboard))
653             html_file.write("\n\t<p><br><br></p>\n")
654     except KeyError:
655         logging.warning("The output file is not defined.")
656         return
657
658
659 def table_failed_tests(table, input_data):
660     """Generate the table(s) with algorithm: table_failed_tests
661     specified in the specification file.
662
663     :param table: Table to generate.
664     :param input_data: Data to process.
665     :type table: pandas.Series
666     :type input_data: InputData
667     """
668
669     logging.info("  Generating the table {0} ...".
670                  format(table.get("title", "")))
671
672     # Transform the data
673     logging.info("    Creating the data set for the {0} '{1}'.".
674                  format(table.get("type", ""), table.get("title", "")))
675     data = input_data.filter_data(table, continue_on_error=True)
676
677     # Prepare the header of the tables
678     header = ["Test Case",
679               "Failures [#]",
680               "Last Failure [Time]",
681               "Last Failure [VPP-Build-Id]",
682               "Last Failure [CSIT-Job-Build-Id]"]
683
684     # Generate the data for the table according to the model in the table
685     # specification
686     tbl_dict = dict()
687     for job, builds in table["data"].items():
688         for build in builds:
689             build = str(build)
690             for tst_name, tst_data in data[job][build].iteritems():
691                 if tst_name.lower() in table["ignore-list"]:
692                     continue
693                 if tbl_dict.get(tst_name, None) is None:
694                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
695                                             tst_data["name"])
696                     tbl_dict[tst_name] = {"name": name,
697                                           "data": OrderedDict()}
698                 try:
699                     tbl_dict[tst_name]["data"][build] = (
700                         tst_data["status"],
701                         input_data.metadata(job, build).get("generated", ""),
702                         input_data.metadata(job, build).get("version", ""),
703                         build)
704                 except (TypeError, KeyError):
705                     pass  # No data in output.xml for this test
706
707     tbl_lst = list()
708     for tst_data in tbl_dict.values():
709         win_size = min(len(tst_data["data"]), table["window"])
710         fails_nr = 0
711         for val in tst_data["data"].values()[-win_size:]:
712             if val[0] == "FAIL":
713                 fails_nr += 1
714                 fails_last_date = val[1]
715                 fails_last_vpp = val[2]
716                 fails_last_csit = val[3]
717         if fails_nr:
718             tbl_lst.append([tst_data["name"],
719                             fails_nr,
720                             fails_last_date,
721                             fails_last_vpp,
722                             "mrr-daily-build-{0}".format(fails_last_csit)])
723
724     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
725     tbl_sorted = list()
726     for nrf in range(table["window"], -1, -1):
727         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
728         tbl_sorted.extend(tbl_fails)
729     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
730
731     logging.info("    Writing file: '{0}'".format(file_name))
732     with open(file_name, "w") as file_handler:
733         file_handler.write(",".join(header) + "\n")
734         for test in tbl_sorted:
735             file_handler.write(",".join([str(item) for item in test]) + '\n')
736
737     txt_file_name = "{0}.txt".format(table["output-file"])
738     logging.info("    Writing file: '{0}'".format(txt_file_name))
739     convert_csv_to_pretty_txt(file_name, txt_file_name)
740
741
742 def table_failed_tests_html(table, input_data):
743     """Generate the table(s) with algorithm: table_failed_tests_html
744     specified in the specification file.
745
746     :param table: Table to generate.
747     :param input_data: Data to process.
748     :type table: pandas.Series
749     :type input_data: InputData
750     """
751
752     logging.info("  Generating the table {0} ...".
753                  format(table.get("title", "")))
754
755     try:
756         with open(table["input-file"], 'rb') as csv_file:
757             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
758             csv_lst = [item for item in csv_content]
759     except KeyError:
760         logging.warning("The input file is not defined.")
761         return
762     except csv.Error as err:
763         logging.warning("Not possible to process the file '{0}'.\n{1}".
764                         format(table["input-file"], err))
765         return
766
767     # Table:
768     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
769
770     # Table header:
771     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
772     for idx, item in enumerate(csv_lst[0]):
773         alignment = "left" if idx == 0 else "center"
774         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
775         th.text = item
776
777     # Rows:
778     colors = ("#e9f1fb", "#d4e4f7")
779     for r_idx, row in enumerate(csv_lst[1:]):
780         background = colors[r_idx % 2]
781         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
782
783         # Columns:
784         for c_idx, item in enumerate(row):
785             alignment = "left" if c_idx == 0 else "center"
786             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
787             # Name:
788             if c_idx == 0:
789                 url = _generate_url("../trending/", item)
790                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
791                 ref.text = item
792             else:
793                 td.text = item
794     try:
795         with open(table["output-file"], 'w') as html_file:
796             logging.info("    Writing file: '{0}'".format(table["output-file"]))
797             html_file.write(".. raw:: html\n\n\t")
798             html_file.write(ET.tostring(failed_tests))
799             html_file.write("\n\t<p><br><br></p>\n")
800     except KeyError:
801         logging.warning("The output file is not defined.")
802         return