CSIT-1269: Add VTS tests to trending
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
32
33
34 def generate_tables(spec, data):
35     """Generate all tables specified in the specification file.
36
37     :param spec: Specification read from the specification file.
38     :param data: Data to process.
39     :type spec: Specification
40     :type data: InputData
41     """
42
43     logging.info("Generating the tables ...")
44     for table in spec.tables:
45         try:
46             eval(table["algorithm"])(table, data)
47         except NameError as err:
48             logging.error("Probably algorithm '{alg}' is not defined: {err}".
49                           format(alg=table["algorithm"], err=repr(err)))
50     logging.info("Done.")
51
52
53 def table_details(table, input_data):
54     """Generate the table(s) with algorithm: table_detailed_test_results
55     specified in the specification file.
56
57     :param table: Table to generate.
58     :param input_data: Data to process.
59     :type table: pandas.Series
60     :type input_data: InputData
61     """
62
63     logging.info("  Generating the table {0} ...".
64                  format(table.get("title", "")))
65
66     # Transform the data
67     logging.info("    Creating the data set for the {0} '{1}'.".
68                  format(table.get("type", ""), table.get("title", "")))
69     data = input_data.filter_data(table)
70
71     # Prepare the header of the tables
72     header = list()
73     for column in table["columns"]:
74         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
75
76     # Generate the data for the table according to the model in the table
77     # specification
78     job = table["data"].keys()[0]
79     build = str(table["data"][job][0])
80     try:
81         suites = input_data.suites(job, build)
82     except KeyError:
83         logging.error("    No data available. The table will not be generated.")
84         return
85
86     for suite_longname, suite in suites.iteritems():
87         # Generate data
88         suite_name = suite["name"]
89         table_lst = list()
90         for test in data[job][build].keys():
91             if data[job][build][test]["parent"] in suite_name:
92                 row_lst = list()
93                 for column in table["columns"]:
94                     try:
95                         col_data = str(data[job][build][test][column["data"].
96                                        split(" ")[1]]).replace('"', '""')
97                         if column["data"].split(" ")[1] in ("vat-history",
98                                                             "show-run"):
99                             col_data = replace(col_data, " |br| ", "",
100                                                maxreplace=1)
101                             col_data = " |prein| {0} |preout| ".\
102                                 format(col_data[:-5])
103                         row_lst.append('"{0}"'.format(col_data))
104                     except KeyError:
105                         row_lst.append("No data")
106                 table_lst.append(row_lst)
107
108         # Write the data to file
109         if table_lst:
110             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111                                             table["output-file-ext"])
112             logging.info("      Writing file: '{}'".format(file_name))
113             with open(file_name, "w") as file_handler:
114                 file_handler.write(",".join(header) + "\n")
115                 for item in table_lst:
116                     file_handler.write(",".join(item) + "\n")
117
118     logging.info("  Done.")
119
120
121 def table_merged_details(table, input_data):
122     """Generate the table(s) with algorithm: table_merged_details
123     specified in the specification file.
124
125     :param table: Table to generate.
126     :param input_data: Data to process.
127     :type table: pandas.Series
128     :type input_data: InputData
129     """
130
131     logging.info("  Generating the table {0} ...".
132                  format(table.get("title", "")))
133
134     # Transform the data
135     logging.info("    Creating the data set for the {0} '{1}'.".
136                  format(table.get("type", ""), table.get("title", "")))
137     data = input_data.filter_data(table)
138     data = input_data.merge_data(data)
139     data.sort_index(inplace=True)
140
141     logging.info("    Creating the data set for the {0} '{1}'.".
142                  format(table.get("type", ""), table.get("title", "")))
143     suites = input_data.filter_data(table, data_set="suites")
144     suites = input_data.merge_data(suites)
145
146     # Prepare the header of the tables
147     header = list()
148     for column in table["columns"]:
149         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
150
151     for _, suite in suites.iteritems():
152         # Generate data
153         suite_name = suite["name"]
154         table_lst = list()
155         for test in data.keys():
156             if data[test]["parent"] in suite_name:
157                 row_lst = list()
158                 for column in table["columns"]:
159                     try:
160                         col_data = str(data[test][column["data"].
161                                        split(" ")[1]]).replace('"', '""')
162                         if column["data"].split(" ")[1] in ("vat-history",
163                                                             "show-run"):
164                             col_data = replace(col_data, " |br| ", "",
165                                                maxreplace=1)
166                             col_data = " |prein| {0} |preout| ".\
167                                 format(col_data[:-5])
168                         row_lst.append('"{0}"'.format(col_data))
169                     except KeyError:
170                         row_lst.append("No data")
171                 table_lst.append(row_lst)
172
173         # Write the data to file
174         if table_lst:
175             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176                                             table["output-file-ext"])
177             logging.info("      Writing file: '{}'".format(file_name))
178             with open(file_name, "w") as file_handler:
179                 file_handler.write(",".join(header) + "\n")
180                 for item in table_lst:
181                     file_handler.write(",".join(item) + "\n")
182
183     logging.info("  Done.")
184
185
186 def table_performance_comparison(table, input_data):
187     """Generate the table(s) with algorithm: table_performance_comparison
188     specified in the specification file.
189
190     :param table: Table to generate.
191     :param input_data: Data to process.
192     :type table: pandas.Series
193     :type input_data: InputData
194     """
195
196     logging.info("  Generating the table {0} ...".
197                  format(table.get("title", "")))
198
199     # Transform the data
200     logging.info("    Creating the data set for the {0} '{1}'.".
201                  format(table.get("type", ""), table.get("title", "")))
202     data = input_data.filter_data(table, continue_on_error=True)
203
204     # Prepare the header of the tables
205     try:
206         header = ["Test case", ]
207
208         if table["include-tests"] == "MRR":
209             hdr_param = "Receive Rate"
210         else:
211             hdr_param = "Throughput"
212
213         history = table.get("history", None)
214         if history:
215             for item in history:
216                 header.extend(
217                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218                      "{0} Stdev [Mpps]".format(item["title"])])
219         header.extend(
220             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
224              "Delta [%]"])
225         header_str = ",".join(header) + "\n"
226     except (AttributeError, KeyError) as err:
227         logging.error("The model is invalid, missing parameter: {0}".
228                       format(err))
229         return
230
231     # Prepare data to the table:
232     tbl_dict = dict()
233     for job, builds in table["reference"]["data"].items():
234         for build in builds:
235             for tst_name, tst_data in data[job][str(build)].iteritems():
236                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
238                     replace("-ndrdisc", "").replace("-pdr", "").\
239                     replace("-ndr", "").\
240                     replace("1t1c", "1c").replace("2t1c", "1c").\
241                     replace("2t2c", "2c").replace("4t2c", "2c").\
242                     replace("4t4c", "4c").replace("8t4c", "4c")
243                 if tbl_dict.get(tst_name_mod, None) is None:
244                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
245                                             "-".join(tst_data["name"].
246                                                      split("-")[:-1]))
247                     if "comparison across testbeds" in table["title"].lower():
248                         name = name.\
249                             replace("1t1c", "1c").replace("2t1c", "1c").\
250                             replace("2t2c", "2c").replace("4t2c", "2c").\
251                             replace("4t4c", "4c").replace("8t4c", "4c")
252                     tbl_dict[tst_name_mod] = {"name": name,
253                                               "ref-data": list(),
254                                               "cmp-data": list()}
255                 try:
256                     # TODO: Re-work when NDRPDRDISC tests are not used
257                     if table["include-tests"] == "MRR":
258                         tbl_dict[tst_name_mod]["ref-data"]. \
259                             append(tst_data["result"]["receive-rate"].avg)
260                     elif table["include-tests"] == "PDR":
261                         if tst_data["type"] == "PDR":
262                             tbl_dict[tst_name_mod]["ref-data"]. \
263                                 append(tst_data["throughput"]["value"])
264                         elif tst_data["type"] == "NDRPDR":
265                             tbl_dict[tst_name_mod]["ref-data"].append(
266                                 tst_data["throughput"]["PDR"]["LOWER"])
267                     elif table["include-tests"] == "NDR":
268                         if tst_data["type"] == "NDR":
269                             tbl_dict[tst_name_mod]["ref-data"]. \
270                                 append(tst_data["throughput"]["value"])
271                         elif tst_data["type"] == "NDRPDR":
272                             tbl_dict[tst_name_mod]["ref-data"].append(
273                                 tst_data["throughput"]["NDR"]["LOWER"])
274                     else:
275                         continue
276                 except TypeError:
277                     pass  # No data in output.xml for this test
278
279     for job, builds in table["compare"]["data"].items():
280         for build in builds:
281             for tst_name, tst_data in data[job][str(build)].iteritems():
282                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
283                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
284                     replace("-ndrdisc", "").replace("-pdr", ""). \
285                     replace("-ndr", "").\
286                     replace("1t1c", "1c").replace("2t1c", "1c").\
287                     replace("2t2c", "2c").replace("4t2c", "2c").\
288                     replace("4t4c", "4c").replace("8t4c", "4c")
289                 try:
290                     # TODO: Re-work when NDRPDRDISC tests are not used
291                     if table["include-tests"] == "MRR":
292                         tbl_dict[tst_name_mod]["cmp-data"]. \
293                             append(tst_data["result"]["receive-rate"].avg)
294                     elif table["include-tests"] == "PDR":
295                         if tst_data["type"] == "PDR":
296                             tbl_dict[tst_name_mod]["cmp-data"]. \
297                                 append(tst_data["throughput"]["value"])
298                         elif tst_data["type"] == "NDRPDR":
299                             tbl_dict[tst_name_mod]["cmp-data"].append(
300                                 tst_data["throughput"]["PDR"]["LOWER"])
301                     elif table["include-tests"] == "NDR":
302                         if tst_data["type"] == "NDR":
303                             tbl_dict[tst_name_mod]["cmp-data"]. \
304                                 append(tst_data["throughput"]["value"])
305                         elif tst_data["type"] == "NDRPDR":
306                             tbl_dict[tst_name_mod]["cmp-data"].append(
307                                 tst_data["throughput"]["NDR"]["LOWER"])
308                     else:
309                         continue
310                 except KeyError:
311                     pass
312                 except TypeError:
313                     tbl_dict.pop(tst_name_mod, None)
314     if history:
315         for item in history:
316             for job, builds in item["data"].items():
317                 for build in builds:
318                     for tst_name, tst_data in data[job][str(build)].iteritems():
319                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
320                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
321                             replace("-ndrdisc", "").replace("-pdr", ""). \
322                             replace("-ndr", "").\
323                             replace("1t1c", "1c").replace("2t1c", "1c").\
324                             replace("2t2c", "2c").replace("4t2c", "2c").\
325                             replace("4t4c", "4c").replace("8t4c", "4c")
326                         if tbl_dict.get(tst_name_mod, None) is None:
327                             continue
328                         if tbl_dict[tst_name_mod].get("history", None) is None:
329                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
330                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
331                                                              None) is None:
332                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
333                                 list()
334                         try:
335                             # TODO: Re-work when NDRPDRDISC tests are not used
336                             if table["include-tests"] == "MRR":
337                                 tbl_dict[tst_name_mod]["history"][item["title"
338                                 ]].append(tst_data["result"]["receive-rate"].
339                                           avg)
340                             elif table["include-tests"] == "PDR":
341                                 if tst_data["type"] == "PDR":
342                                     tbl_dict[tst_name_mod]["history"][
343                                         item["title"]].\
344                                         append(tst_data["throughput"]["value"])
345                                 elif tst_data["type"] == "NDRPDR":
346                                     tbl_dict[tst_name_mod]["history"][item[
347                                         "title"]].append(tst_data["throughput"][
348                                         "PDR"]["LOWER"])
349                             elif table["include-tests"] == "NDR":
350                                 if tst_data["type"] == "NDR":
351                                     tbl_dict[tst_name_mod]["history"][
352                                         item["title"]].\
353                                         append(tst_data["throughput"]["value"])
354                                 elif tst_data["type"] == "NDRPDR":
355                                     tbl_dict[tst_name_mod]["history"][item[
356                                         "title"]].append(tst_data["throughput"][
357                                         "NDR"]["LOWER"])
358                             else:
359                                 continue
360                         except (TypeError, KeyError):
361                             pass
362
363     tbl_lst = list()
364     for tst_name in tbl_dict.keys():
365         item = [tbl_dict[tst_name]["name"], ]
366         if history:
367             if tbl_dict[tst_name].get("history", None) is not None:
368                 for hist_data in tbl_dict[tst_name]["history"].values():
369                     if hist_data:
370                         item.append(round(mean(hist_data) / 1000000, 2))
371                         item.append(round(stdev(hist_data) / 1000000, 2))
372                     else:
373                         item.extend([None, None])
374             else:
375                 item.extend([None, None])
376         data_t = tbl_dict[tst_name]["ref-data"]
377         if data_t:
378             item.append(round(mean(data_t) / 1000000, 2))
379             item.append(round(stdev(data_t) / 1000000, 2))
380         else:
381             item.extend([None, None])
382         data_t = tbl_dict[tst_name]["cmp-data"]
383         if data_t:
384             item.append(round(mean(data_t) / 1000000, 2))
385             item.append(round(stdev(data_t) / 1000000, 2))
386         else:
387             item.extend([None, None])
388         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
389             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
390         if len(item) == len(header):
391             tbl_lst.append(item)
392
393     # Sort the table according to the relative change
394     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
395
396     # Generate csv tables:
397     csv_file = "{0}.csv".format(table["output-file"])
398     with open(csv_file, "w") as file_handler:
399         file_handler.write(header_str)
400         for test in tbl_lst:
401             file_handler.write(",".join([str(item) for item in test]) + "\n")
402
403     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
404
405
406 def table_performance_trending_dashboard(table, input_data):
407     """Generate the table(s) with algorithm:
408     table_performance_trending_dashboard
409     specified in the specification file.
410
411     :param table: Table to generate.
412     :param input_data: Data to process.
413     :type table: pandas.Series
414     :type input_data: InputData
415     """
416
417     logging.info("  Generating the table {0} ...".
418                  format(table.get("title", "")))
419
420     # Transform the data
421     logging.info("    Creating the data set for the {0} '{1}'.".
422                  format(table.get("type", ""), table.get("title", "")))
423     data = input_data.filter_data(table, continue_on_error=True)
424
425     # Prepare the header of the tables
426     header = ["Test Case",
427               "Trend [Mpps]",
428               "Short-Term Change [%]",
429               "Long-Term Change [%]",
430               "Regressions [#]",
431               "Progressions [#]"
432               ]
433     header_str = ",".join(header) + "\n"
434
435     # Prepare data to the table:
436     tbl_dict = dict()
437     for job, builds in table["data"].items():
438         for build in builds:
439             for tst_name, tst_data in data[job][str(build)].iteritems():
440                 if tst_name.lower() in table["ignore-list"]:
441                     continue
442                 if tbl_dict.get(tst_name, None) is None:
443                     groups = re.search(REGEX_NIC, tst_data["parent"])
444                     if not groups:
445                         continue
446                     nic = groups.group(0)
447                     tbl_dict[tst_name] = {
448                         "name": "{0}-{1}".format(nic, tst_data["name"]),
449                         "data": OrderedDict()}
450                 try:
451                     tbl_dict[tst_name]["data"][str(build)] = \
452                         tst_data["result"]["receive-rate"]
453                 except (TypeError, KeyError):
454                     pass  # No data in output.xml for this test
455
456     tbl_lst = list()
457     for tst_name in tbl_dict.keys():
458         data_t = tbl_dict[tst_name]["data"]
459         if len(data_t) < 2:
460             continue
461
462         classification_lst, avgs = classify_anomalies(data_t)
463
464         win_size = min(len(data_t), table["window"])
465         long_win_size = min(len(data_t), table["long-trend-window"])
466
467         try:
468             max_long_avg = max(
469                 [x for x in avgs[-long_win_size:-win_size]
470                  if not isnan(x)])
471         except ValueError:
472             max_long_avg = nan
473         last_avg = avgs[-1]
474         avg_week_ago = avgs[max(-win_size, -len(avgs))]
475
476         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
477             rel_change_last = nan
478         else:
479             rel_change_last = round(
480                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
481
482         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
483             rel_change_long = nan
484         else:
485             rel_change_long = round(
486                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
487
488         if classification_lst:
489             if isnan(rel_change_last) and isnan(rel_change_long):
490                 continue
491             tbl_lst.append(
492                 [tbl_dict[tst_name]["name"],
493                  '-' if isnan(last_avg) else
494                  round(last_avg / 1000000, 2),
495                  '-' if isnan(rel_change_last) else rel_change_last,
496                  '-' if isnan(rel_change_long) else rel_change_long,
497                  classification_lst[-win_size:].count("regression"),
498                  classification_lst[-win_size:].count("progression")])
499
500     tbl_lst.sort(key=lambda rel: rel[0])
501
502     tbl_sorted = list()
503     for nrr in range(table["window"], -1, -1):
504         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
505         for nrp in range(table["window"], -1, -1):
506             tbl_out = [item for item in tbl_reg if item[5] == nrp]
507             tbl_out.sort(key=lambda rel: rel[2])
508             tbl_sorted.extend(tbl_out)
509
510     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
511
512     logging.info("    Writing file: '{0}'".format(file_name))
513     with open(file_name, "w") as file_handler:
514         file_handler.write(header_str)
515         for test in tbl_sorted:
516             file_handler.write(",".join([str(item) for item in test]) + '\n')
517
518     txt_file_name = "{0}.txt".format(table["output-file"])
519     logging.info("    Writing file: '{0}'".format(txt_file_name))
520     convert_csv_to_pretty_txt(file_name, txt_file_name)
521
522
523 def _generate_url(base, testbed, test_name):
524     """Generate URL to a trending plot from the name of the test case.
525
526     :param base: The base part of URL common to all test cases.
527     :param testbed: The testbed used for testing.
528     :param test_name: The name of the test case.
529     :type base: str
530     :type testbed: str
531     :type test_name: str
532     :returns: The URL to the plot with the trending data for the given test
533         case.
534     :rtype str
535     """
536
537     url = base
538     file_name = ""
539     anchor = ".html#"
540     feature = ""
541
542     if "lbdpdk" in test_name or "lbvpp" in test_name:
543         file_name = "link_bonding"
544
545     elif "114b" in test_name and "vhost" in test_name:
546         file_name = "vts"
547
548     elif "testpmd" in test_name or "l3fwd" in test_name:
549         file_name = "dpdk"
550
551     elif "memif" in test_name:
552         file_name = "container_memif"
553         feature = "-base"
554
555     elif "srv6" in test_name:
556         file_name = "srv6"
557
558     elif "vhost" in test_name:
559         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
560             file_name = "vm_vhost_l2"
561             if "114b" in test_name:
562                 feature = ""
563             elif "l2xcbase" in test_name:
564                 feature = "-base-l2xc"
565             elif "l2bdbasemaclrn" in test_name:
566                 feature = "-base-l2bd"
567             else:
568                 feature = "-base"
569         elif "ip4base" in test_name:
570             file_name = "vm_vhost_ip4"
571             feature = "-base"
572
573     elif "ipsec" in test_name:
574         file_name = "ipsec"
575         feature = "-base-scale"
576
577     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
578         file_name = "ip4_tunnels"
579         feature = "-base"
580
581     elif "ip4base" in test_name or "ip4scale" in test_name:
582         file_name = "ip4"
583         if "xl710" in test_name:
584             feature = "-base-scale-features"
585         elif "iacl" in test_name:
586             feature = "-features-iacl"
587         elif "oacl" in test_name:
588             feature = "-features-oacl"
589         elif "snat" in test_name or "cop" in test_name:
590             feature = "-features"
591         else:
592             feature = "-base-scale"
593
594     elif "ip6base" in test_name or "ip6scale" in test_name:
595         file_name = "ip6"
596         feature = "-base-scale"
597
598     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
599             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
600             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
601         file_name = "l2"
602         if "macip" in test_name:
603             feature = "-features-macip"
604         elif "iacl" in test_name:
605             feature = "-features-iacl"
606         elif "oacl" in test_name:
607             feature = "-features-oacl"
608         else:
609             feature = "-base-scale"
610
611     if "x520" in test_name:
612         nic = "x520-"
613     elif "x710" in test_name:
614         nic = "x710-"
615     elif "xl710" in test_name:
616         nic = "xl710-"
617     elif "xxv710" in test_name:
618         nic = "xxv710-"
619     else:
620         nic = ""
621     anchor += nic
622
623     if "64b" in test_name:
624         framesize = "64b"
625     elif "78b" in test_name:
626         framesize = "78b"
627     elif "imix" in test_name:
628         framesize = "imix"
629     elif "9000b" in test_name:
630         framesize = "9000b"
631     elif "1518b" in test_name:
632         framesize = "1518b"
633     elif "114b" in test_name:
634         framesize = "114b"
635     else:
636         framesize = ""
637     anchor += framesize + '-'
638
639     if "1t1c" in test_name:
640         anchor += "1t1c"
641     elif "2t2c" in test_name:
642         anchor += "2t2c"
643     elif "4t4c" in test_name:
644         anchor += "4t4c"
645     elif "2t1c" in test_name:
646         anchor += "2t1c"
647     elif "4t2c" in test_name:
648         anchor += "4t2c"
649     elif "8t4c" in test_name:
650         anchor += "8t4c"
651
652     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
653            anchor + feature
654
655
656 def table_performance_trending_dashboard_html(table, input_data):
657     """Generate the table(s) with algorithm:
658     table_performance_trending_dashboard_html specified in the specification
659     file.
660
661     :param table: Table to generate.
662     :param input_data: Data to process.
663     :type table: dict
664     :type input_data: InputData
665     """
666
667     testbed = table.get("testbed", None)
668     if testbed is None:
669         logging.error("The testbed is not defined for the table '{0}'.".
670                       format(table.get("title", "")))
671         return
672
673     logging.info("  Generating the table {0} ...".
674                  format(table.get("title", "")))
675
676     try:
677         with open(table["input-file"], 'rb') as csv_file:
678             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
679             csv_lst = [item for item in csv_content]
680     except KeyError:
681         logging.warning("The input file is not defined.")
682         return
683     except csv.Error as err:
684         logging.warning("Not possible to process the file '{0}'.\n{1}".
685                         format(table["input-file"], err))
686         return
687
688     # Table:
689     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
690
691     # Table header:
692     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
693     for idx, item in enumerate(csv_lst[0]):
694         alignment = "left" if idx == 0 else "center"
695         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
696         th.text = item
697
698     # Rows:
699     colors = {"regression": ("#ffcccc", "#ff9999"),
700               "progression": ("#c6ecc6", "#9fdf9f"),
701               "normal": ("#e9f1fb", "#d4e4f7")}
702     for r_idx, row in enumerate(csv_lst[1:]):
703         if int(row[4]):
704             color = "regression"
705         elif int(row[5]):
706             color = "progression"
707         else:
708             color = "normal"
709         background = colors[color][r_idx % 2]
710         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
711
712         # Columns:
713         for c_idx, item in enumerate(row):
714             alignment = "left" if c_idx == 0 else "center"
715             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
716             # Name:
717             if c_idx == 0:
718                 url = _generate_url("../trending/", testbed, item)
719                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
720                 ref.text = item
721             else:
722                 td.text = item
723     try:
724         with open(table["output-file"], 'w') as html_file:
725             logging.info("    Writing file: '{0}'".format(table["output-file"]))
726             html_file.write(".. raw:: html\n\n\t")
727             html_file.write(ET.tostring(dashboard))
728             html_file.write("\n\t<p><br><br></p>\n")
729     except KeyError:
730         logging.warning("The output file is not defined.")
731         return
732
733
734 def table_failed_tests(table, input_data):
735     """Generate the table(s) with algorithm: table_failed_tests
736     specified in the specification file.
737
738     :param table: Table to generate.
739     :param input_data: Data to process.
740     :type table: pandas.Series
741     :type input_data: InputData
742     """
743
744     logging.info("  Generating the table {0} ...".
745                  format(table.get("title", "")))
746
747     # Transform the data
748     logging.info("    Creating the data set for the {0} '{1}'.".
749                  format(table.get("type", ""), table.get("title", "")))
750     data = input_data.filter_data(table, continue_on_error=True)
751
752     # Prepare the header of the tables
753     header = ["Test Case",
754               "Failures [#]",
755               "Last Failure [Time]",
756               "Last Failure [VPP-Build-Id]",
757               "Last Failure [CSIT-Job-Build-Id]"]
758
759     # Generate the data for the table according to the model in the table
760     # specification
761     tbl_dict = dict()
762     for job, builds in table["data"].items():
763         for build in builds:
764             build = str(build)
765             for tst_name, tst_data in data[job][build].iteritems():
766                 if tst_name.lower() in table["ignore-list"]:
767                     continue
768                 if tbl_dict.get(tst_name, None) is None:
769                     groups = re.search(REGEX_NIC, tst_data["parent"])
770                     if not groups:
771                         continue
772                     nic = groups.group(0)
773                     tbl_dict[tst_name] = {
774                         "name": "{0}-{1}".format(nic, tst_data["name"]),
775                         "data": OrderedDict()}
776                 try:
777                     tbl_dict[tst_name]["data"][build] = (
778                         tst_data["status"],
779                         input_data.metadata(job, build).get("generated", ""),
780                         input_data.metadata(job, build).get("version", ""),
781                         build)
782                 except (TypeError, KeyError):
783                     pass  # No data in output.xml for this test
784
785     tbl_lst = list()
786     for tst_data in tbl_dict.values():
787         win_size = min(len(tst_data["data"]), table["window"])
788         fails_nr = 0
789         for val in tst_data["data"].values()[-win_size:]:
790             if val[0] == "FAIL":
791                 fails_nr += 1
792                 fails_last_date = val[1]
793                 fails_last_vpp = val[2]
794                 fails_last_csit = val[3]
795         if fails_nr:
796             tbl_lst.append([tst_data["name"],
797                             fails_nr,
798                             fails_last_date,
799                             fails_last_vpp,
800                             "mrr-daily-build-{0}".format(fails_last_csit)])
801
802     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
803     tbl_sorted = list()
804     for nrf in range(table["window"], -1, -1):
805         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
806         tbl_sorted.extend(tbl_fails)
807     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
808
809     logging.info("    Writing file: '{0}'".format(file_name))
810     with open(file_name, "w") as file_handler:
811         file_handler.write(",".join(header) + "\n")
812         for test in tbl_sorted:
813             file_handler.write(",".join([str(item) for item in test]) + '\n')
814
815     txt_file_name = "{0}.txt".format(table["output-file"])
816     logging.info("    Writing file: '{0}'".format(txt_file_name))
817     convert_csv_to_pretty_txt(file_name, txt_file_name)
818
819
820 def table_failed_tests_html(table, input_data):
821     """Generate the table(s) with algorithm: table_failed_tests_html
822     specified in the specification file.
823
824     :param table: Table to generate.
825     :param input_data: Data to process.
826     :type table: pandas.Series
827     :type input_data: InputData
828     """
829
830     testbed = table.get("testbed", None)
831     if testbed is None:
832         logging.error("The testbed is not defined for the table '{0}'.".
833                       format(table.get("title", "")))
834         return
835
836     logging.info("  Generating the table {0} ...".
837                  format(table.get("title", "")))
838
839     try:
840         with open(table["input-file"], 'rb') as csv_file:
841             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
842             csv_lst = [item for item in csv_content]
843     except KeyError:
844         logging.warning("The input file is not defined.")
845         return
846     except csv.Error as err:
847         logging.warning("Not possible to process the file '{0}'.\n{1}".
848                         format(table["input-file"], err))
849         return
850
851     # Table:
852     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
853
854     # Table header:
855     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
856     for idx, item in enumerate(csv_lst[0]):
857         alignment = "left" if idx == 0 else "center"
858         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
859         th.text = item
860
861     # Rows:
862     colors = ("#e9f1fb", "#d4e4f7")
863     for r_idx, row in enumerate(csv_lst[1:]):
864         background = colors[r_idx % 2]
865         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
866
867         # Columns:
868         for c_idx, item in enumerate(row):
869             alignment = "left" if c_idx == 0 else "center"
870             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
871             # Name:
872             if c_idx == 0:
873                 url = _generate_url("../trending/", testbed, item)
874                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
875                 ref.text = item
876             else:
877                 td.text = item
878     try:
879         with open(table["output-file"], 'w') as html_file:
880             logging.info("    Writing file: '{0}'".format(table["output-file"]))
881             html_file.write(".. raw:: html\n\n\t")
882             html_file.write(ET.tostring(failed_tests))
883             html_file.write("\n\t<p><br><br></p>\n")
884     except KeyError:
885         logging.warning("The output file is not defined.")
886         return