CSIT-1262: Add 2n/3n-skx to trending
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
32
33
34 def generate_tables(spec, data):
35     """Generate all tables specified in the specification file.
36
37     :param spec: Specification read from the specification file.
38     :param data: Data to process.
39     :type spec: Specification
40     :type data: InputData
41     """
42
43     logging.info("Generating the tables ...")
44     for table in spec.tables:
45         try:
46             eval(table["algorithm"])(table, data)
47         except NameError as err:
48             logging.error("Probably algorithm '{alg}' is not defined: {err}".
49                           format(alg=table["algorithm"], err=repr(err)))
50     logging.info("Done.")
51
52
53 def table_details(table, input_data):
54     """Generate the table(s) with algorithm: table_detailed_test_results
55     specified in the specification file.
56
57     :param table: Table to generate.
58     :param input_data: Data to process.
59     :type table: pandas.Series
60     :type input_data: InputData
61     """
62
63     logging.info("  Generating the table {0} ...".
64                  format(table.get("title", "")))
65
66     # Transform the data
67     logging.info("    Creating the data set for the {0} '{1}'.".
68                  format(table.get("type", ""), table.get("title", "")))
69     data = input_data.filter_data(table)
70
71     # Prepare the header of the tables
72     header = list()
73     for column in table["columns"]:
74         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
75
76     # Generate the data for the table according to the model in the table
77     # specification
78     job = table["data"].keys()[0]
79     build = str(table["data"][job][0])
80     try:
81         suites = input_data.suites(job, build)
82     except KeyError:
83         logging.error("    No data available. The table will not be generated.")
84         return
85
86     for suite_longname, suite in suites.iteritems():
87         # Generate data
88         suite_name = suite["name"]
89         table_lst = list()
90         for test in data[job][build].keys():
91             if data[job][build][test]["parent"] in suite_name:
92                 row_lst = list()
93                 for column in table["columns"]:
94                     try:
95                         col_data = str(data[job][build][test][column["data"].
96                                        split(" ")[1]]).replace('"', '""')
97                         if column["data"].split(" ")[1] in ("vat-history",
98                                                             "show-run"):
99                             col_data = replace(col_data, " |br| ", "",
100                                                maxreplace=1)
101                             col_data = " |prein| {0} |preout| ".\
102                                 format(col_data[:-5])
103                         row_lst.append('"{0}"'.format(col_data))
104                     except KeyError:
105                         row_lst.append("No data")
106                 table_lst.append(row_lst)
107
108         # Write the data to file
109         if table_lst:
110             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111                                             table["output-file-ext"])
112             logging.info("      Writing file: '{}'".format(file_name))
113             with open(file_name, "w") as file_handler:
114                 file_handler.write(",".join(header) + "\n")
115                 for item in table_lst:
116                     file_handler.write(",".join(item) + "\n")
117
118     logging.info("  Done.")
119
120
121 def table_merged_details(table, input_data):
122     """Generate the table(s) with algorithm: table_merged_details
123     specified in the specification file.
124
125     :param table: Table to generate.
126     :param input_data: Data to process.
127     :type table: pandas.Series
128     :type input_data: InputData
129     """
130
131     logging.info("  Generating the table {0} ...".
132                  format(table.get("title", "")))
133
134     # Transform the data
135     logging.info("    Creating the data set for the {0} '{1}'.".
136                  format(table.get("type", ""), table.get("title", "")))
137     data = input_data.filter_data(table)
138     data = input_data.merge_data(data)
139     data.sort_index(inplace=True)
140
141     logging.info("    Creating the data set for the {0} '{1}'.".
142                  format(table.get("type", ""), table.get("title", "")))
143     suites = input_data.filter_data(table, data_set="suites")
144     suites = input_data.merge_data(suites)
145
146     # Prepare the header of the tables
147     header = list()
148     for column in table["columns"]:
149         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
150
151     for _, suite in suites.iteritems():
152         # Generate data
153         suite_name = suite["name"]
154         table_lst = list()
155         for test in data.keys():
156             if data[test]["parent"] in suite_name:
157                 row_lst = list()
158                 for column in table["columns"]:
159                     try:
160                         col_data = str(data[test][column["data"].
161                                        split(" ")[1]]).replace('"', '""')
162                         if column["data"].split(" ")[1] in ("vat-history",
163                                                             "show-run"):
164                             col_data = replace(col_data, " |br| ", "",
165                                                maxreplace=1)
166                             col_data = " |prein| {0} |preout| ".\
167                                 format(col_data[:-5])
168                         row_lst.append('"{0}"'.format(col_data))
169                     except KeyError:
170                         row_lst.append("No data")
171                 table_lst.append(row_lst)
172
173         # Write the data to file
174         if table_lst:
175             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176                                             table["output-file-ext"])
177             logging.info("      Writing file: '{}'".format(file_name))
178             with open(file_name, "w") as file_handler:
179                 file_handler.write(",".join(header) + "\n")
180                 for item in table_lst:
181                     file_handler.write(",".join(item) + "\n")
182
183     logging.info("  Done.")
184
185
186 def table_performance_comparison(table, input_data):
187     """Generate the table(s) with algorithm: table_performance_comparison
188     specified in the specification file.
189
190     :param table: Table to generate.
191     :param input_data: Data to process.
192     :type table: pandas.Series
193     :type input_data: InputData
194     """
195
196     logging.info("  Generating the table {0} ...".
197                  format(table.get("title", "")))
198
199     # Transform the data
200     logging.info("    Creating the data set for the {0} '{1}'.".
201                  format(table.get("type", ""), table.get("title", "")))
202     data = input_data.filter_data(table, continue_on_error=True)
203
204     # Prepare the header of the tables
205     try:
206         header = ["Test case", ]
207
208         if table["include-tests"] == "MRR":
209             hdr_param = "Receive Rate"
210         else:
211             hdr_param = "Throughput"
212
213         history = table.get("history", None)
214         if history:
215             for item in history:
216                 header.extend(
217                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218                      "{0} Stdev [Mpps]".format(item["title"])])
219         header.extend(
220             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
224              "Delta [%]"])
225         header_str = ",".join(header) + "\n"
226     except (AttributeError, KeyError) as err:
227         logging.error("The model is invalid, missing parameter: {0}".
228                       format(err))
229         return
230
231     # Prepare data to the table:
232     tbl_dict = dict()
233     for job, builds in table["reference"]["data"].items():
234         for build in builds:
235             for tst_name, tst_data in data[job][str(build)].iteritems():
236                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
238                     replace("-ndrdisc", "").replace("-pdr", "").\
239                     replace("-ndr", "").\
240                     replace("1t1c", "1c").replace("2t1c", "1c").\
241                     replace("2t2c", "2c").replace("4t2c", "2c").\
242                     replace("4t4c", "4c").replace("8t4c", "4c")
243                 if tbl_dict.get(tst_name_mod, None) is None:
244                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
245                                             "-".join(tst_data["name"].
246                                                      split("-")[:-1]))
247                     if "comparison across testbeds" in table["title"].lower():
248                         name = name.\
249                             replace("1t1c", "1c").replace("2t1c", "1c").\
250                             replace("2t2c", "2c").replace("4t2c", "2c").\
251                             replace("4t4c", "4c").replace("8t4c", "4c")
252                     tbl_dict[tst_name_mod] = {"name": name,
253                                               "ref-data": list(),
254                                               "cmp-data": list()}
255                 try:
256                     # TODO: Re-work when NDRPDRDISC tests are not used
257                     if table["include-tests"] == "MRR":
258                         tbl_dict[tst_name_mod]["ref-data"]. \
259                             append(tst_data["result"]["receive-rate"].avg)
260                     elif table["include-tests"] == "PDR":
261                         if tst_data["type"] == "PDR":
262                             tbl_dict[tst_name_mod]["ref-data"]. \
263                                 append(tst_data["throughput"]["value"])
264                         elif tst_data["type"] == "NDRPDR":
265                             tbl_dict[tst_name_mod]["ref-data"].append(
266                                 tst_data["throughput"]["PDR"]["LOWER"])
267                     elif table["include-tests"] == "NDR":
268                         if tst_data["type"] == "NDR":
269                             tbl_dict[tst_name_mod]["ref-data"]. \
270                                 append(tst_data["throughput"]["value"])
271                         elif tst_data["type"] == "NDRPDR":
272                             tbl_dict[tst_name_mod]["ref-data"].append(
273                                 tst_data["throughput"]["NDR"]["LOWER"])
274                     else:
275                         continue
276                 except TypeError:
277                     pass  # No data in output.xml for this test
278
279     for job, builds in table["compare"]["data"].items():
280         for build in builds:
281             for tst_name, tst_data in data[job][str(build)].iteritems():
282                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
283                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
284                     replace("-ndrdisc", "").replace("-pdr", ""). \
285                     replace("-ndr", "").\
286                     replace("1t1c", "1c").replace("2t1c", "1c").\
287                     replace("2t2c", "2c").replace("4t2c", "2c").\
288                     replace("4t4c", "4c").replace("8t4c", "4c")
289                 try:
290                     # TODO: Re-work when NDRPDRDISC tests are not used
291                     if table["include-tests"] == "MRR":
292                         tbl_dict[tst_name_mod]["cmp-data"]. \
293                             append(tst_data["result"]["receive-rate"].avg)
294                     elif table["include-tests"] == "PDR":
295                         if tst_data["type"] == "PDR":
296                             tbl_dict[tst_name_mod]["cmp-data"]. \
297                                 append(tst_data["throughput"]["value"])
298                         elif tst_data["type"] == "NDRPDR":
299                             tbl_dict[tst_name_mod]["cmp-data"].append(
300                                 tst_data["throughput"]["PDR"]["LOWER"])
301                     elif table["include-tests"] == "NDR":
302                         if tst_data["type"] == "NDR":
303                             tbl_dict[tst_name_mod]["cmp-data"]. \
304                                 append(tst_data["throughput"]["value"])
305                         elif tst_data["type"] == "NDRPDR":
306                             tbl_dict[tst_name_mod]["cmp-data"].append(
307                                 tst_data["throughput"]["NDR"]["LOWER"])
308                     else:
309                         continue
310                 except KeyError:
311                     pass
312                 except TypeError:
313                     tbl_dict.pop(tst_name_mod, None)
314     if history:
315         for item in history:
316             for job, builds in item["data"].items():
317                 for build in builds:
318                     for tst_name, tst_data in data[job][str(build)].iteritems():
319                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
320                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
321                             replace("-ndrdisc", "").replace("-pdr", ""). \
322                             replace("-ndr", "").\
323                             replace("1t1c", "1c").replace("2t1c", "1c").\
324                             replace("2t2c", "2c").replace("4t2c", "2c").\
325                             replace("4t4c", "4c").replace("8t4c", "4c")
326                         if tbl_dict.get(tst_name_mod, None) is None:
327                             continue
328                         if tbl_dict[tst_name_mod].get("history", None) is None:
329                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
330                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
331                                                              None) is None:
332                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
333                                 list()
334                         try:
335                             # TODO: Re-work when NDRPDRDISC tests are not used
336                             if table["include-tests"] == "MRR":
337                                 tbl_dict[tst_name_mod]["history"][item["title"
338                                 ]].append(tst_data["result"]["receive-rate"].
339                                           avg)
340                             elif table["include-tests"] == "PDR":
341                                 if tst_data["type"] == "PDR":
342                                     tbl_dict[tst_name_mod]["history"][
343                                         item["title"]].\
344                                         append(tst_data["throughput"]["value"])
345                                 elif tst_data["type"] == "NDRPDR":
346                                     tbl_dict[tst_name_mod]["history"][item[
347                                         "title"]].append(tst_data["throughput"][
348                                         "PDR"]["LOWER"])
349                             elif table["include-tests"] == "NDR":
350                                 if tst_data["type"] == "NDR":
351                                     tbl_dict[tst_name_mod]["history"][
352                                         item["title"]].\
353                                         append(tst_data["throughput"]["value"])
354                                 elif tst_data["type"] == "NDRPDR":
355                                     tbl_dict[tst_name_mod]["history"][item[
356                                         "title"]].append(tst_data["throughput"][
357                                         "NDR"]["LOWER"])
358                             else:
359                                 continue
360                         except (TypeError, KeyError):
361                             pass
362
363     tbl_lst = list()
364     for tst_name in tbl_dict.keys():
365         item = [tbl_dict[tst_name]["name"], ]
366         if history:
367             if tbl_dict[tst_name].get("history", None) is not None:
368                 for hist_data in tbl_dict[tst_name]["history"].values():
369                     if hist_data:
370                         item.append(round(mean(hist_data) / 1000000, 2))
371                         item.append(round(stdev(hist_data) / 1000000, 2))
372                     else:
373                         item.extend([None, None])
374             else:
375                 item.extend([None, None])
376         data_t = tbl_dict[tst_name]["ref-data"]
377         if data_t:
378             item.append(round(mean(data_t) / 1000000, 2))
379             item.append(round(stdev(data_t) / 1000000, 2))
380         else:
381             item.extend([None, None])
382         data_t = tbl_dict[tst_name]["cmp-data"]
383         if data_t:
384             item.append(round(mean(data_t) / 1000000, 2))
385             item.append(round(stdev(data_t) / 1000000, 2))
386         else:
387             item.extend([None, None])
388         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
389             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
390         if len(item) == len(header):
391             tbl_lst.append(item)
392
393     # Sort the table according to the relative change
394     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
395
396     # Generate csv tables:
397     csv_file = "{0}.csv".format(table["output-file"])
398     with open(csv_file, "w") as file_handler:
399         file_handler.write(header_str)
400         for test in tbl_lst:
401             file_handler.write(",".join([str(item) for item in test]) + "\n")
402
403     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
404
405
406 def table_performance_trending_dashboard(table, input_data):
407     """Generate the table(s) with algorithm:
408     table_performance_trending_dashboard
409     specified in the specification file.
410
411     :param table: Table to generate.
412     :param input_data: Data to process.
413     :type table: pandas.Series
414     :type input_data: InputData
415     """
416
417     logging.info("  Generating the table {0} ...".
418                  format(table.get("title", "")))
419
420     # Transform the data
421     logging.info("    Creating the data set for the {0} '{1}'.".
422                  format(table.get("type", ""), table.get("title", "")))
423     data = input_data.filter_data(table, continue_on_error=True)
424
425     # Prepare the header of the tables
426     header = ["Test Case",
427               "Trend [Mpps]",
428               "Short-Term Change [%]",
429               "Long-Term Change [%]",
430               "Regressions [#]",
431               "Progressions [#]"
432               ]
433     header_str = ",".join(header) + "\n"
434
435     # Prepare data to the table:
436     tbl_dict = dict()
437     for job, builds in table["data"].items():
438         for build in builds:
439             for tst_name, tst_data in data[job][str(build)].iteritems():
440                 if tst_name.lower() in table["ignore-list"]:
441                     continue
442                 if tbl_dict.get(tst_name, None) is None:
443                     groups = re.search(REGEX_NIC, tst_data["parent"])
444                     if not groups:
445                         continue
446                     nic = groups.group(0)
447                     tbl_dict[tst_name] = {
448                         "name": "{0}-{1}".format(nic, tst_data["name"]),
449                         "data": OrderedDict()}
450                 try:
451                     tbl_dict[tst_name]["data"][str(build)] = \
452                         tst_data["result"]["receive-rate"]
453                 except (TypeError, KeyError):
454                     pass  # No data in output.xml for this test
455
456     tbl_lst = list()
457     for tst_name in tbl_dict.keys():
458         data_t = tbl_dict[tst_name]["data"]
459         if len(data_t) < 2:
460             continue
461
462         classification_lst, avgs = classify_anomalies(data_t)
463
464         win_size = min(len(data_t), table["window"])
465         long_win_size = min(len(data_t), table["long-trend-window"])
466
467         try:
468             max_long_avg = max(
469                 [x for x in avgs[-long_win_size:-win_size]
470                  if not isnan(x)])
471         except ValueError:
472             max_long_avg = nan
473         last_avg = avgs[-1]
474         avg_week_ago = avgs[max(-win_size, -len(avgs))]
475
476         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
477             rel_change_last = nan
478         else:
479             rel_change_last = round(
480                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
481
482         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
483             rel_change_long = nan
484         else:
485             rel_change_long = round(
486                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
487
488         if classification_lst:
489             if isnan(rel_change_last) and isnan(rel_change_long):
490                 continue
491             tbl_lst.append(
492                 [tbl_dict[tst_name]["name"],
493                  '-' if isnan(last_avg) else
494                  round(last_avg / 1000000, 2),
495                  '-' if isnan(rel_change_last) else rel_change_last,
496                  '-' if isnan(rel_change_long) else rel_change_long,
497                  classification_lst[-win_size:].count("regression"),
498                  classification_lst[-win_size:].count("progression")])
499
500     tbl_lst.sort(key=lambda rel: rel[0])
501
502     tbl_sorted = list()
503     for nrr in range(table["window"], -1, -1):
504         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
505         for nrp in range(table["window"], -1, -1):
506             tbl_out = [item for item in tbl_reg if item[5] == nrp]
507             tbl_out.sort(key=lambda rel: rel[2])
508             tbl_sorted.extend(tbl_out)
509
510     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
511
512     logging.info("    Writing file: '{0}'".format(file_name))
513     with open(file_name, "w") as file_handler:
514         file_handler.write(header_str)
515         for test in tbl_sorted:
516             file_handler.write(",".join([str(item) for item in test]) + '\n')
517
518     txt_file_name = "{0}.txt".format(table["output-file"])
519     logging.info("    Writing file: '{0}'".format(txt_file_name))
520     convert_csv_to_pretty_txt(file_name, txt_file_name)
521
522
523 def _generate_url(base, testbed, test_name):
524     """Generate URL to a trending plot from the name of the test case.
525
526     :param base: The base part of URL common to all test cases.
527     :param testbed: The testbed used for testing.
528     :param test_name: The name of the test case.
529     :type base: str
530     :type testbed: str
531     :type test_name: str
532     :returns: The URL to the plot with the trending data for the given test
533         case.
534     :rtype str
535     """
536
537     url = base
538     file_name = ""
539     anchor = ".html#"
540     feature = ""
541
542     if "lbdpdk" in test_name or "lbvpp" in test_name:
543         file_name = "link_bonding"
544
545     elif "testpmd" in test_name or "l3fwd" in test_name:
546         file_name = "dpdk"
547
548     elif "memif" in test_name:
549         file_name = "container_memif"
550         feature = "-base"
551
552     elif "srv6" in test_name:
553         file_name = "srv6"
554
555     elif "vhost" in test_name:
556         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
557             file_name = "vm_vhost_l2"
558         elif "ip4base" in test_name:
559             file_name = "vm_vhost_ip4"
560         feature = "-base"
561
562     elif "ipsec" in test_name:
563         file_name = "ipsec"
564         feature = "-base-scale"
565
566     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
567         file_name = "ip4_tunnels"
568         feature = "-base"
569
570     elif "ip4base" in test_name or "ip4scale" in test_name:
571         file_name = "ip4"
572         if "xl710" in test_name:
573             feature = "-base-scale-features"
574         elif "acl" in test_name or "snat" in test_name or "cop" in test_name:
575             feature = "-features"
576         else:
577             feature = "-base-scale"
578
579     elif "ip6base" in test_name or "ip6scale" in test_name:
580         file_name = "ip6"
581         feature = "-base-scale"
582
583     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
584             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
585             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
586         file_name = "l2"
587         if "acl" in test_name:
588             feature = "-features"
589         else:
590             feature = "-base-scale"
591
592     if "x520" in test_name:
593         nic = "x520-"
594     elif "x710" in test_name:
595         nic = "x710-"
596     elif "xl710" in test_name:
597         nic = "xl710-"
598     elif "xxv710" in test_name:
599         nic = "xxv710-"
600     else:
601         nic = ""
602     anchor += nic
603
604     if "64b" in test_name:
605         framesize = "64b"
606     elif "78b" in test_name:
607         framesize = "78b"
608     elif "imix" in test_name:
609         framesize = "imix"
610     elif "9000b" in test_name:
611         framesize = "9000b"
612     elif "1518b" in test_name:
613         framesize = "1518b"
614     elif "114b" in test_name:
615         framesize = "114b"
616     else:
617         framesize = ""
618     anchor += framesize + '-'
619
620     if "1t1c" in test_name:
621         anchor += "1t1c"
622     elif "2t2c" in test_name:
623         anchor += "2t2c"
624     elif "4t4c" in test_name:
625         anchor += "4t4c"
626     elif "2t1c" in test_name:
627         anchor += "2t1c"
628     elif "4t2c" in test_name:
629         anchor += "4t2c"
630     elif "8t4c" in test_name:
631         anchor += "8t4c"
632
633     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
634            anchor + feature
635
636
637 def table_performance_trending_dashboard_html(table, input_data):
638     """Generate the table(s) with algorithm:
639     table_performance_trending_dashboard_html specified in the specification
640     file.
641
642     :param table: Table to generate.
643     :param input_data: Data to process.
644     :type table: dict
645     :type input_data: InputData
646     """
647
648     testbed = table.get("testbed", None)
649     if testbed is None:
650         logging.error("The testbed is not defined for the table '{0}'.".
651                       format(table.get("title", "")))
652         return
653
654     logging.info("  Generating the table {0} ...".
655                  format(table.get("title", "")))
656
657     try:
658         with open(table["input-file"], 'rb') as csv_file:
659             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
660             csv_lst = [item for item in csv_content]
661     except KeyError:
662         logging.warning("The input file is not defined.")
663         return
664     except csv.Error as err:
665         logging.warning("Not possible to process the file '{0}'.\n{1}".
666                         format(table["input-file"], err))
667         return
668
669     # Table:
670     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
671
672     # Table header:
673     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
674     for idx, item in enumerate(csv_lst[0]):
675         alignment = "left" if idx == 0 else "center"
676         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
677         th.text = item
678
679     # Rows:
680     colors = {"regression": ("#ffcccc", "#ff9999"),
681               "progression": ("#c6ecc6", "#9fdf9f"),
682               "normal": ("#e9f1fb", "#d4e4f7")}
683     for r_idx, row in enumerate(csv_lst[1:]):
684         if int(row[4]):
685             color = "regression"
686         elif int(row[5]):
687             color = "progression"
688         else:
689             color = "normal"
690         background = colors[color][r_idx % 2]
691         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
692
693         # Columns:
694         for c_idx, item in enumerate(row):
695             alignment = "left" if c_idx == 0 else "center"
696             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
697             # Name:
698             if c_idx == 0:
699                 url = _generate_url("../trending/", testbed, item)
700                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
701                 ref.text = item
702             else:
703                 td.text = item
704     try:
705         with open(table["output-file"], 'w') as html_file:
706             logging.info("    Writing file: '{0}'".format(table["output-file"]))
707             html_file.write(".. raw:: html\n\n\t")
708             html_file.write(ET.tostring(dashboard))
709             html_file.write("\n\t<p><br><br></p>\n")
710     except KeyError:
711         logging.warning("The output file is not defined.")
712         return
713
714
715 def table_failed_tests(table, input_data):
716     """Generate the table(s) with algorithm: table_failed_tests
717     specified in the specification file.
718
719     :param table: Table to generate.
720     :param input_data: Data to process.
721     :type table: pandas.Series
722     :type input_data: InputData
723     """
724
725     logging.info("  Generating the table {0} ...".
726                  format(table.get("title", "")))
727
728     # Transform the data
729     logging.info("    Creating the data set for the {0} '{1}'.".
730                  format(table.get("type", ""), table.get("title", "")))
731     data = input_data.filter_data(table, continue_on_error=True)
732
733     # Prepare the header of the tables
734     header = ["Test Case",
735               "Failures [#]",
736               "Last Failure [Time]",
737               "Last Failure [VPP-Build-Id]",
738               "Last Failure [CSIT-Job-Build-Id]"]
739
740     # Generate the data for the table according to the model in the table
741     # specification
742     tbl_dict = dict()
743     for job, builds in table["data"].items():
744         for build in builds:
745             build = str(build)
746             for tst_name, tst_data in data[job][build].iteritems():
747                 if tst_name.lower() in table["ignore-list"]:
748                     continue
749                 if tbl_dict.get(tst_name, None) is None:
750                     groups = re.search(REGEX_NIC, tst_data["parent"])
751                     if not groups:
752                         continue
753                     nic = groups.group(0)
754                     tbl_dict[tst_name] = {
755                         "name": "{0}-{1}".format(nic, tst_data["name"]),
756                         "data": OrderedDict()}
757                 try:
758                     tbl_dict[tst_name]["data"][build] = (
759                         tst_data["status"],
760                         input_data.metadata(job, build).get("generated", ""),
761                         input_data.metadata(job, build).get("version", ""),
762                         build)
763                 except (TypeError, KeyError):
764                     pass  # No data in output.xml for this test
765
766     tbl_lst = list()
767     for tst_data in tbl_dict.values():
768         win_size = min(len(tst_data["data"]), table["window"])
769         fails_nr = 0
770         for val in tst_data["data"].values()[-win_size:]:
771             if val[0] == "FAIL":
772                 fails_nr += 1
773                 fails_last_date = val[1]
774                 fails_last_vpp = val[2]
775                 fails_last_csit = val[3]
776         if fails_nr:
777             tbl_lst.append([tst_data["name"],
778                             fails_nr,
779                             fails_last_date,
780                             fails_last_vpp,
781                             "mrr-daily-build-{0}".format(fails_last_csit)])
782
783     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
784     tbl_sorted = list()
785     for nrf in range(table["window"], -1, -1):
786         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
787         tbl_sorted.extend(tbl_fails)
788     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
789
790     logging.info("    Writing file: '{0}'".format(file_name))
791     with open(file_name, "w") as file_handler:
792         file_handler.write(",".join(header) + "\n")
793         for test in tbl_sorted:
794             file_handler.write(",".join([str(item) for item in test]) + '\n')
795
796     txt_file_name = "{0}.txt".format(table["output-file"])
797     logging.info("    Writing file: '{0}'".format(txt_file_name))
798     convert_csv_to_pretty_txt(file_name, txt_file_name)
799
800
801 def table_failed_tests_html(table, input_data):
802     """Generate the table(s) with algorithm: table_failed_tests_html
803     specified in the specification file.
804
805     :param table: Table to generate.
806     :param input_data: Data to process.
807     :type table: pandas.Series
808     :type input_data: InputData
809     """
810
811     testbed = table.get("testbed", None)
812     if testbed is None:
813         logging.error("The testbed is not defined for the table '{0}'.".
814                       format(table.get("title", "")))
815         return
816
817     logging.info("  Generating the table {0} ...".
818                  format(table.get("title", "")))
819
820     try:
821         with open(table["input-file"], 'rb') as csv_file:
822             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
823             csv_lst = [item for item in csv_content]
824     except KeyError:
825         logging.warning("The input file is not defined.")
826         return
827     except csv.Error as err:
828         logging.warning("Not possible to process the file '{0}'.\n{1}".
829                         format(table["input-file"], err))
830         return
831
832     # Table:
833     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
834
835     # Table header:
836     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
837     for idx, item in enumerate(csv_lst[0]):
838         alignment = "left" if idx == 0 else "center"
839         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
840         th.text = item
841
842     # Rows:
843     colors = ("#e9f1fb", "#d4e4f7")
844     for r_idx, row in enumerate(csv_lst[1:]):
845         background = colors[r_idx % 2]
846         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
847
848         # Columns:
849         for c_idx, item in enumerate(row):
850             alignment = "left" if c_idx == 0 else "center"
851             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
852             # Name:
853             if c_idx == 0:
854                 url = _generate_url("../trending/", testbed, item)
855                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
856                 ref.text = item
857             else:
858                 td.text = item
859     try:
860         with open(table["output-file"], 'w') as html_file:
861             logging.info("    Writing file: '{0}'".format(table["output-file"]))
862             html_file.write(".. raw:: html\n\n\t")
863             html_file.write(ET.tostring(failed_tests))
864             html_file.write("\n\t<p><br><br></p>\n")
865     except KeyError:
866         logging.warning("The output file is not defined.")
867         return