CSIT-1270: Split feature tests in trending
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
32
33
34 def generate_tables(spec, data):
35     """Generate all tables specified in the specification file.
36
37     :param spec: Specification read from the specification file.
38     :param data: Data to process.
39     :type spec: Specification
40     :type data: InputData
41     """
42
43     logging.info("Generating the tables ...")
44     for table in spec.tables:
45         try:
46             eval(table["algorithm"])(table, data)
47         except NameError as err:
48             logging.error("Probably algorithm '{alg}' is not defined: {err}".
49                           format(alg=table["algorithm"], err=repr(err)))
50     logging.info("Done.")
51
52
53 def table_details(table, input_data):
54     """Generate the table(s) with algorithm: table_detailed_test_results
55     specified in the specification file.
56
57     :param table: Table to generate.
58     :param input_data: Data to process.
59     :type table: pandas.Series
60     :type input_data: InputData
61     """
62
63     logging.info("  Generating the table {0} ...".
64                  format(table.get("title", "")))
65
66     # Transform the data
67     logging.info("    Creating the data set for the {0} '{1}'.".
68                  format(table.get("type", ""), table.get("title", "")))
69     data = input_data.filter_data(table)
70
71     # Prepare the header of the tables
72     header = list()
73     for column in table["columns"]:
74         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
75
76     # Generate the data for the table according to the model in the table
77     # specification
78     job = table["data"].keys()[0]
79     build = str(table["data"][job][0])
80     try:
81         suites = input_data.suites(job, build)
82     except KeyError:
83         logging.error("    No data available. The table will not be generated.")
84         return
85
86     for suite_longname, suite in suites.iteritems():
87         # Generate data
88         suite_name = suite["name"]
89         table_lst = list()
90         for test in data[job][build].keys():
91             if data[job][build][test]["parent"] in suite_name:
92                 row_lst = list()
93                 for column in table["columns"]:
94                     try:
95                         col_data = str(data[job][build][test][column["data"].
96                                        split(" ")[1]]).replace('"', '""')
97                         if column["data"].split(" ")[1] in ("vat-history",
98                                                             "show-run"):
99                             col_data = replace(col_data, " |br| ", "",
100                                                maxreplace=1)
101                             col_data = " |prein| {0} |preout| ".\
102                                 format(col_data[:-5])
103                         row_lst.append('"{0}"'.format(col_data))
104                     except KeyError:
105                         row_lst.append("No data")
106                 table_lst.append(row_lst)
107
108         # Write the data to file
109         if table_lst:
110             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111                                             table["output-file-ext"])
112             logging.info("      Writing file: '{}'".format(file_name))
113             with open(file_name, "w") as file_handler:
114                 file_handler.write(",".join(header) + "\n")
115                 for item in table_lst:
116                     file_handler.write(",".join(item) + "\n")
117
118     logging.info("  Done.")
119
120
121 def table_merged_details(table, input_data):
122     """Generate the table(s) with algorithm: table_merged_details
123     specified in the specification file.
124
125     :param table: Table to generate.
126     :param input_data: Data to process.
127     :type table: pandas.Series
128     :type input_data: InputData
129     """
130
131     logging.info("  Generating the table {0} ...".
132                  format(table.get("title", "")))
133
134     # Transform the data
135     logging.info("    Creating the data set for the {0} '{1}'.".
136                  format(table.get("type", ""), table.get("title", "")))
137     data = input_data.filter_data(table)
138     data = input_data.merge_data(data)
139     data.sort_index(inplace=True)
140
141     logging.info("    Creating the data set for the {0} '{1}'.".
142                  format(table.get("type", ""), table.get("title", "")))
143     suites = input_data.filter_data(table, data_set="suites")
144     suites = input_data.merge_data(suites)
145
146     # Prepare the header of the tables
147     header = list()
148     for column in table["columns"]:
149         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
150
151     for _, suite in suites.iteritems():
152         # Generate data
153         suite_name = suite["name"]
154         table_lst = list()
155         for test in data.keys():
156             if data[test]["parent"] in suite_name:
157                 row_lst = list()
158                 for column in table["columns"]:
159                     try:
160                         col_data = str(data[test][column["data"].
161                                        split(" ")[1]]).replace('"', '""')
162                         if column["data"].split(" ")[1] in ("vat-history",
163                                                             "show-run"):
164                             col_data = replace(col_data, " |br| ", "",
165                                                maxreplace=1)
166                             col_data = " |prein| {0} |preout| ".\
167                                 format(col_data[:-5])
168                         row_lst.append('"{0}"'.format(col_data))
169                     except KeyError:
170                         row_lst.append("No data")
171                 table_lst.append(row_lst)
172
173         # Write the data to file
174         if table_lst:
175             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176                                             table["output-file-ext"])
177             logging.info("      Writing file: '{}'".format(file_name))
178             with open(file_name, "w") as file_handler:
179                 file_handler.write(",".join(header) + "\n")
180                 for item in table_lst:
181                     file_handler.write(",".join(item) + "\n")
182
183     logging.info("  Done.")
184
185
186 def table_performance_comparison(table, input_data):
187     """Generate the table(s) with algorithm: table_performance_comparison
188     specified in the specification file.
189
190     :param table: Table to generate.
191     :param input_data: Data to process.
192     :type table: pandas.Series
193     :type input_data: InputData
194     """
195
196     logging.info("  Generating the table {0} ...".
197                  format(table.get("title", "")))
198
199     # Transform the data
200     logging.info("    Creating the data set for the {0} '{1}'.".
201                  format(table.get("type", ""), table.get("title", "")))
202     data = input_data.filter_data(table, continue_on_error=True)
203
204     # Prepare the header of the tables
205     try:
206         header = ["Test case", ]
207
208         if table["include-tests"] == "MRR":
209             hdr_param = "Receive Rate"
210         else:
211             hdr_param = "Throughput"
212
213         history = table.get("history", None)
214         if history:
215             for item in history:
216                 header.extend(
217                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218                      "{0} Stdev [Mpps]".format(item["title"])])
219         header.extend(
220             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
224              "Delta [%]"])
225         header_str = ",".join(header) + "\n"
226     except (AttributeError, KeyError) as err:
227         logging.error("The model is invalid, missing parameter: {0}".
228                       format(err))
229         return
230
231     # Prepare data to the table:
232     tbl_dict = dict()
233     for job, builds in table["reference"]["data"].items():
234         for build in builds:
235             for tst_name, tst_data in data[job][str(build)].iteritems():
236                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
238                     replace("-ndrdisc", "").replace("-pdr", "").\
239                     replace("-ndr", "").\
240                     replace("1t1c", "1c").replace("2t1c", "1c").\
241                     replace("2t2c", "2c").replace("4t2c", "2c").\
242                     replace("4t4c", "4c").replace("8t4c", "4c")
243                 if tbl_dict.get(tst_name_mod, None) is None:
244                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
245                                             "-".join(tst_data["name"].
246                                                      split("-")[:-1]))
247                     if "comparison across testbeds" in table["title"].lower():
248                         name = name.\
249                             replace("1t1c", "1c").replace("2t1c", "1c").\
250                             replace("2t2c", "2c").replace("4t2c", "2c").\
251                             replace("4t4c", "4c").replace("8t4c", "4c")
252                     tbl_dict[tst_name_mod] = {"name": name,
253                                               "ref-data": list(),
254                                               "cmp-data": list()}
255                 try:
256                     # TODO: Re-work when NDRPDRDISC tests are not used
257                     if table["include-tests"] == "MRR":
258                         tbl_dict[tst_name_mod]["ref-data"]. \
259                             append(tst_data["result"]["receive-rate"].avg)
260                     elif table["include-tests"] == "PDR":
261                         if tst_data["type"] == "PDR":
262                             tbl_dict[tst_name_mod]["ref-data"]. \
263                                 append(tst_data["throughput"]["value"])
264                         elif tst_data["type"] == "NDRPDR":
265                             tbl_dict[tst_name_mod]["ref-data"].append(
266                                 tst_data["throughput"]["PDR"]["LOWER"])
267                     elif table["include-tests"] == "NDR":
268                         if tst_data["type"] == "NDR":
269                             tbl_dict[tst_name_mod]["ref-data"]. \
270                                 append(tst_data["throughput"]["value"])
271                         elif tst_data["type"] == "NDRPDR":
272                             tbl_dict[tst_name_mod]["ref-data"].append(
273                                 tst_data["throughput"]["NDR"]["LOWER"])
274                     else:
275                         continue
276                 except TypeError:
277                     pass  # No data in output.xml for this test
278
279     for job, builds in table["compare"]["data"].items():
280         for build in builds:
281             for tst_name, tst_data in data[job][str(build)].iteritems():
282                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
283                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
284                     replace("-ndrdisc", "").replace("-pdr", ""). \
285                     replace("-ndr", "").\
286                     replace("1t1c", "1c").replace("2t1c", "1c").\
287                     replace("2t2c", "2c").replace("4t2c", "2c").\
288                     replace("4t4c", "4c").replace("8t4c", "4c")
289                 try:
290                     # TODO: Re-work when NDRPDRDISC tests are not used
291                     if table["include-tests"] == "MRR":
292                         tbl_dict[tst_name_mod]["cmp-data"]. \
293                             append(tst_data["result"]["receive-rate"].avg)
294                     elif table["include-tests"] == "PDR":
295                         if tst_data["type"] == "PDR":
296                             tbl_dict[tst_name_mod]["cmp-data"]. \
297                                 append(tst_data["throughput"]["value"])
298                         elif tst_data["type"] == "NDRPDR":
299                             tbl_dict[tst_name_mod]["cmp-data"].append(
300                                 tst_data["throughput"]["PDR"]["LOWER"])
301                     elif table["include-tests"] == "NDR":
302                         if tst_data["type"] == "NDR":
303                             tbl_dict[tst_name_mod]["cmp-data"]. \
304                                 append(tst_data["throughput"]["value"])
305                         elif tst_data["type"] == "NDRPDR":
306                             tbl_dict[tst_name_mod]["cmp-data"].append(
307                                 tst_data["throughput"]["NDR"]["LOWER"])
308                     else:
309                         continue
310                 except KeyError:
311                     pass
312                 except TypeError:
313                     tbl_dict.pop(tst_name_mod, None)
314     if history:
315         for item in history:
316             for job, builds in item["data"].items():
317                 for build in builds:
318                     for tst_name, tst_data in data[job][str(build)].iteritems():
319                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
320                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
321                             replace("-ndrdisc", "").replace("-pdr", ""). \
322                             replace("-ndr", "").\
323                             replace("1t1c", "1c").replace("2t1c", "1c").\
324                             replace("2t2c", "2c").replace("4t2c", "2c").\
325                             replace("4t4c", "4c").replace("8t4c", "4c")
326                         if tbl_dict.get(tst_name_mod, None) is None:
327                             continue
328                         if tbl_dict[tst_name_mod].get("history", None) is None:
329                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
330                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
331                                                              None) is None:
332                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
333                                 list()
334                         try:
335                             # TODO: Re-work when NDRPDRDISC tests are not used
336                             if table["include-tests"] == "MRR":
337                                 tbl_dict[tst_name_mod]["history"][item["title"
338                                 ]].append(tst_data["result"]["receive-rate"].
339                                           avg)
340                             elif table["include-tests"] == "PDR":
341                                 if tst_data["type"] == "PDR":
342                                     tbl_dict[tst_name_mod]["history"][
343                                         item["title"]].\
344                                         append(tst_data["throughput"]["value"])
345                                 elif tst_data["type"] == "NDRPDR":
346                                     tbl_dict[tst_name_mod]["history"][item[
347                                         "title"]].append(tst_data["throughput"][
348                                         "PDR"]["LOWER"])
349                             elif table["include-tests"] == "NDR":
350                                 if tst_data["type"] == "NDR":
351                                     tbl_dict[tst_name_mod]["history"][
352                                         item["title"]].\
353                                         append(tst_data["throughput"]["value"])
354                                 elif tst_data["type"] == "NDRPDR":
355                                     tbl_dict[tst_name_mod]["history"][item[
356                                         "title"]].append(tst_data["throughput"][
357                                         "NDR"]["LOWER"])
358                             else:
359                                 continue
360                         except (TypeError, KeyError):
361                             pass
362
363     tbl_lst = list()
364     for tst_name in tbl_dict.keys():
365         item = [tbl_dict[tst_name]["name"], ]
366         if history:
367             if tbl_dict[tst_name].get("history", None) is not None:
368                 for hist_data in tbl_dict[tst_name]["history"].values():
369                     if hist_data:
370                         item.append(round(mean(hist_data) / 1000000, 2))
371                         item.append(round(stdev(hist_data) / 1000000, 2))
372                     else:
373                         item.extend([None, None])
374             else:
375                 item.extend([None, None])
376         data_t = tbl_dict[tst_name]["ref-data"]
377         if data_t:
378             item.append(round(mean(data_t) / 1000000, 2))
379             item.append(round(stdev(data_t) / 1000000, 2))
380         else:
381             item.extend([None, None])
382         data_t = tbl_dict[tst_name]["cmp-data"]
383         if data_t:
384             item.append(round(mean(data_t) / 1000000, 2))
385             item.append(round(stdev(data_t) / 1000000, 2))
386         else:
387             item.extend([None, None])
388         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
389             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
390         if len(item) == len(header):
391             tbl_lst.append(item)
392
393     # Sort the table according to the relative change
394     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
395
396     # Generate csv tables:
397     csv_file = "{0}.csv".format(table["output-file"])
398     with open(csv_file, "w") as file_handler:
399         file_handler.write(header_str)
400         for test in tbl_lst:
401             file_handler.write(",".join([str(item) for item in test]) + "\n")
402
403     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
404
405
406 def table_performance_trending_dashboard(table, input_data):
407     """Generate the table(s) with algorithm:
408     table_performance_trending_dashboard
409     specified in the specification file.
410
411     :param table: Table to generate.
412     :param input_data: Data to process.
413     :type table: pandas.Series
414     :type input_data: InputData
415     """
416
417     logging.info("  Generating the table {0} ...".
418                  format(table.get("title", "")))
419
420     # Transform the data
421     logging.info("    Creating the data set for the {0} '{1}'.".
422                  format(table.get("type", ""), table.get("title", "")))
423     data = input_data.filter_data(table, continue_on_error=True)
424
425     # Prepare the header of the tables
426     header = ["Test Case",
427               "Trend [Mpps]",
428               "Short-Term Change [%]",
429               "Long-Term Change [%]",
430               "Regressions [#]",
431               "Progressions [#]"
432               ]
433     header_str = ",".join(header) + "\n"
434
435     # Prepare data to the table:
436     tbl_dict = dict()
437     for job, builds in table["data"].items():
438         for build in builds:
439             for tst_name, tst_data in data[job][str(build)].iteritems():
440                 if tst_name.lower() in table["ignore-list"]:
441                     continue
442                 if tbl_dict.get(tst_name, None) is None:
443                     groups = re.search(REGEX_NIC, tst_data["parent"])
444                     if not groups:
445                         continue
446                     nic = groups.group(0)
447                     tbl_dict[tst_name] = {
448                         "name": "{0}-{1}".format(nic, tst_data["name"]),
449                         "data": OrderedDict()}
450                 try:
451                     tbl_dict[tst_name]["data"][str(build)] = \
452                         tst_data["result"]["receive-rate"]
453                 except (TypeError, KeyError):
454                     pass  # No data in output.xml for this test
455
456     tbl_lst = list()
457     for tst_name in tbl_dict.keys():
458         data_t = tbl_dict[tst_name]["data"]
459         if len(data_t) < 2:
460             continue
461
462         classification_lst, avgs = classify_anomalies(data_t)
463
464         win_size = min(len(data_t), table["window"])
465         long_win_size = min(len(data_t), table["long-trend-window"])
466
467         try:
468             max_long_avg = max(
469                 [x for x in avgs[-long_win_size:-win_size]
470                  if not isnan(x)])
471         except ValueError:
472             max_long_avg = nan
473         last_avg = avgs[-1]
474         avg_week_ago = avgs[max(-win_size, -len(avgs))]
475
476         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
477             rel_change_last = nan
478         else:
479             rel_change_last = round(
480                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
481
482         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
483             rel_change_long = nan
484         else:
485             rel_change_long = round(
486                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
487
488         if classification_lst:
489             if isnan(rel_change_last) and isnan(rel_change_long):
490                 continue
491             tbl_lst.append(
492                 [tbl_dict[tst_name]["name"],
493                  '-' if isnan(last_avg) else
494                  round(last_avg / 1000000, 2),
495                  '-' if isnan(rel_change_last) else rel_change_last,
496                  '-' if isnan(rel_change_long) else rel_change_long,
497                  classification_lst[-win_size:].count("regression"),
498                  classification_lst[-win_size:].count("progression")])
499
500     tbl_lst.sort(key=lambda rel: rel[0])
501
502     tbl_sorted = list()
503     for nrr in range(table["window"], -1, -1):
504         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
505         for nrp in range(table["window"], -1, -1):
506             tbl_out = [item for item in tbl_reg if item[5] == nrp]
507             tbl_out.sort(key=lambda rel: rel[2])
508             tbl_sorted.extend(tbl_out)
509
510     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
511
512     logging.info("    Writing file: '{0}'".format(file_name))
513     with open(file_name, "w") as file_handler:
514         file_handler.write(header_str)
515         for test in tbl_sorted:
516             file_handler.write(",".join([str(item) for item in test]) + '\n')
517
518     txt_file_name = "{0}.txt".format(table["output-file"])
519     logging.info("    Writing file: '{0}'".format(txt_file_name))
520     convert_csv_to_pretty_txt(file_name, txt_file_name)
521
522
523 def _generate_url(base, testbed, test_name):
524     """Generate URL to a trending plot from the name of the test case.
525
526     :param base: The base part of URL common to all test cases.
527     :param testbed: The testbed used for testing.
528     :param test_name: The name of the test case.
529     :type base: str
530     :type testbed: str
531     :type test_name: str
532     :returns: The URL to the plot with the trending data for the given test
533         case.
534     :rtype str
535     """
536
537     url = base
538     file_name = ""
539     anchor = ".html#"
540     feature = ""
541
542     if "lbdpdk" in test_name or "lbvpp" in test_name:
543         file_name = "link_bonding"
544
545     elif "testpmd" in test_name or "l3fwd" in test_name:
546         file_name = "dpdk"
547
548     elif "memif" in test_name:
549         file_name = "container_memif"
550         feature = "-base"
551
552     elif "srv6" in test_name:
553         file_name = "srv6"
554
555     elif "vhost" in test_name:
556         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
557             file_name = "vm_vhost_l2"
558             if "l2xcbase" in test_name:
559                 feature = "-base-l2xc"
560             elif "l2bdbasemaclrn" in test_name:
561                 feature = "-base-l2bd"
562             else:
563                 feature = "-base"
564         elif "ip4base" in test_name:
565             file_name = "vm_vhost_ip4"
566             feature = "-base"
567
568     elif "ipsec" in test_name:
569         file_name = "ipsec"
570         feature = "-base-scale"
571
572     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
573         file_name = "ip4_tunnels"
574         feature = "-base"
575
576     elif "ip4base" in test_name or "ip4scale" in test_name:
577         file_name = "ip4"
578         if "xl710" in test_name:
579             feature = "-base-scale-features"
580         elif "iacl" in test_name:
581             feature = "-features-iacl"
582         elif "oacl" in test_name:
583             feature = "-features-oacl"
584         elif "snat" in test_name or "cop" in test_name:
585             feature = "-features"
586         else:
587             feature = "-base-scale"
588
589     elif "ip6base" in test_name or "ip6scale" in test_name:
590         file_name = "ip6"
591         feature = "-base-scale"
592
593     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
594             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
595             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
596         file_name = "l2"
597         if "macip" in test_name:
598             feature = "-features-macip"
599         elif "iacl" in test_name:
600             feature = "-features-iacl"
601         elif "oacl" in test_name:
602             feature = "-features-oacl"
603         else:
604             feature = "-base-scale"
605
606     if "x520" in test_name:
607         nic = "x520-"
608     elif "x710" in test_name:
609         nic = "x710-"
610     elif "xl710" in test_name:
611         nic = "xl710-"
612     elif "xxv710" in test_name:
613         nic = "xxv710-"
614     else:
615         nic = ""
616     anchor += nic
617
618     if "64b" in test_name:
619         framesize = "64b"
620     elif "78b" in test_name:
621         framesize = "78b"
622     elif "imix" in test_name:
623         framesize = "imix"
624     elif "9000b" in test_name:
625         framesize = "9000b"
626     elif "1518b" in test_name:
627         framesize = "1518b"
628     elif "114b" in test_name:
629         framesize = "114b"
630     else:
631         framesize = ""
632     anchor += framesize + '-'
633
634     if "1t1c" in test_name:
635         anchor += "1t1c"
636     elif "2t2c" in test_name:
637         anchor += "2t2c"
638     elif "4t4c" in test_name:
639         anchor += "4t4c"
640     elif "2t1c" in test_name:
641         anchor += "2t1c"
642     elif "4t2c" in test_name:
643         anchor += "4t2c"
644     elif "8t4c" in test_name:
645         anchor += "8t4c"
646
647     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
648            anchor + feature
649
650
651 def table_performance_trending_dashboard_html(table, input_data):
652     """Generate the table(s) with algorithm:
653     table_performance_trending_dashboard_html specified in the specification
654     file.
655
656     :param table: Table to generate.
657     :param input_data: Data to process.
658     :type table: dict
659     :type input_data: InputData
660     """
661
662     testbed = table.get("testbed", None)
663     if testbed is None:
664         logging.error("The testbed is not defined for the table '{0}'.".
665                       format(table.get("title", "")))
666         return
667
668     logging.info("  Generating the table {0} ...".
669                  format(table.get("title", "")))
670
671     try:
672         with open(table["input-file"], 'rb') as csv_file:
673             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
674             csv_lst = [item for item in csv_content]
675     except KeyError:
676         logging.warning("The input file is not defined.")
677         return
678     except csv.Error as err:
679         logging.warning("Not possible to process the file '{0}'.\n{1}".
680                         format(table["input-file"], err))
681         return
682
683     # Table:
684     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
685
686     # Table header:
687     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
688     for idx, item in enumerate(csv_lst[0]):
689         alignment = "left" if idx == 0 else "center"
690         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
691         th.text = item
692
693     # Rows:
694     colors = {"regression": ("#ffcccc", "#ff9999"),
695               "progression": ("#c6ecc6", "#9fdf9f"),
696               "normal": ("#e9f1fb", "#d4e4f7")}
697     for r_idx, row in enumerate(csv_lst[1:]):
698         if int(row[4]):
699             color = "regression"
700         elif int(row[5]):
701             color = "progression"
702         else:
703             color = "normal"
704         background = colors[color][r_idx % 2]
705         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
706
707         # Columns:
708         for c_idx, item in enumerate(row):
709             alignment = "left" if c_idx == 0 else "center"
710             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
711             # Name:
712             if c_idx == 0:
713                 url = _generate_url("../trending/", testbed, item)
714                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
715                 ref.text = item
716             else:
717                 td.text = item
718     try:
719         with open(table["output-file"], 'w') as html_file:
720             logging.info("    Writing file: '{0}'".format(table["output-file"]))
721             html_file.write(".. raw:: html\n\n\t")
722             html_file.write(ET.tostring(dashboard))
723             html_file.write("\n\t<p><br><br></p>\n")
724     except KeyError:
725         logging.warning("The output file is not defined.")
726         return
727
728
729 def table_failed_tests(table, input_data):
730     """Generate the table(s) with algorithm: table_failed_tests
731     specified in the specification file.
732
733     :param table: Table to generate.
734     :param input_data: Data to process.
735     :type table: pandas.Series
736     :type input_data: InputData
737     """
738
739     logging.info("  Generating the table {0} ...".
740                  format(table.get("title", "")))
741
742     # Transform the data
743     logging.info("    Creating the data set for the {0} '{1}'.".
744                  format(table.get("type", ""), table.get("title", "")))
745     data = input_data.filter_data(table, continue_on_error=True)
746
747     # Prepare the header of the tables
748     header = ["Test Case",
749               "Failures [#]",
750               "Last Failure [Time]",
751               "Last Failure [VPP-Build-Id]",
752               "Last Failure [CSIT-Job-Build-Id]"]
753
754     # Generate the data for the table according to the model in the table
755     # specification
756     tbl_dict = dict()
757     for job, builds in table["data"].items():
758         for build in builds:
759             build = str(build)
760             for tst_name, tst_data in data[job][build].iteritems():
761                 if tst_name.lower() in table["ignore-list"]:
762                     continue
763                 if tbl_dict.get(tst_name, None) is None:
764                     groups = re.search(REGEX_NIC, tst_data["parent"])
765                     if not groups:
766                         continue
767                     nic = groups.group(0)
768                     tbl_dict[tst_name] = {
769                         "name": "{0}-{1}".format(nic, tst_data["name"]),
770                         "data": OrderedDict()}
771                 try:
772                     tbl_dict[tst_name]["data"][build] = (
773                         tst_data["status"],
774                         input_data.metadata(job, build).get("generated", ""),
775                         input_data.metadata(job, build).get("version", ""),
776                         build)
777                 except (TypeError, KeyError):
778                     pass  # No data in output.xml for this test
779
780     tbl_lst = list()
781     for tst_data in tbl_dict.values():
782         win_size = min(len(tst_data["data"]), table["window"])
783         fails_nr = 0
784         for val in tst_data["data"].values()[-win_size:]:
785             if val[0] == "FAIL":
786                 fails_nr += 1
787                 fails_last_date = val[1]
788                 fails_last_vpp = val[2]
789                 fails_last_csit = val[3]
790         if fails_nr:
791             tbl_lst.append([tst_data["name"],
792                             fails_nr,
793                             fails_last_date,
794                             fails_last_vpp,
795                             "mrr-daily-build-{0}".format(fails_last_csit)])
796
797     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
798     tbl_sorted = list()
799     for nrf in range(table["window"], -1, -1):
800         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
801         tbl_sorted.extend(tbl_fails)
802     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
803
804     logging.info("    Writing file: '{0}'".format(file_name))
805     with open(file_name, "w") as file_handler:
806         file_handler.write(",".join(header) + "\n")
807         for test in tbl_sorted:
808             file_handler.write(",".join([str(item) for item in test]) + '\n')
809
810     txt_file_name = "{0}.txt".format(table["output-file"])
811     logging.info("    Writing file: '{0}'".format(txt_file_name))
812     convert_csv_to_pretty_txt(file_name, txt_file_name)
813
814
815 def table_failed_tests_html(table, input_data):
816     """Generate the table(s) with algorithm: table_failed_tests_html
817     specified in the specification file.
818
819     :param table: Table to generate.
820     :param input_data: Data to process.
821     :type table: pandas.Series
822     :type input_data: InputData
823     """
824
825     testbed = table.get("testbed", None)
826     if testbed is None:
827         logging.error("The testbed is not defined for the table '{0}'.".
828                       format(table.get("title", "")))
829         return
830
831     logging.info("  Generating the table {0} ...".
832                  format(table.get("title", "")))
833
834     try:
835         with open(table["input-file"], 'rb') as csv_file:
836             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
837             csv_lst = [item for item in csv_content]
838     except KeyError:
839         logging.warning("The input file is not defined.")
840         return
841     except csv.Error as err:
842         logging.warning("Not possible to process the file '{0}'.\n{1}".
843                         format(table["input-file"], err))
844         return
845
846     # Table:
847     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
848
849     # Table header:
850     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
851     for idx, item in enumerate(csv_lst[0]):
852         alignment = "left" if idx == 0 else "center"
853         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
854         th.text = item
855
856     # Rows:
857     colors = ("#e9f1fb", "#d4e4f7")
858     for r_idx, row in enumerate(csv_lst[1:]):
859         background = colors[r_idx % 2]
860         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
861
862         # Columns:
863         for c_idx, item in enumerate(row):
864             alignment = "left" if c_idx == 0 else "center"
865             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
866             # Name:
867             if c_idx == 0:
868                 url = _generate_url("../trending/", testbed, item)
869                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
870                 ref.text = item
871             else:
872                 td.text = item
873     try:
874         with open(table["output-file"], 'w') as html_file:
875             logging.info("    Writing file: '{0}'".format(table["output-file"]))
876             html_file.write(".. raw:: html\n\n\t")
877             html_file.write(ET.tostring(failed_tests))
878             html_file.write("\n\t<p><br><br></p>\n")
879     except KeyError:
880         logging.warning("The output file is not defined.")
881         return