a3373db6d8ba4c7bf37fbf6c2772676525210a36
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28     convert_csv_to_pretty_txt
29
30
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
32
33
34 def generate_tables(spec, data):
35     """Generate all tables specified in the specification file.
36
37     :param spec: Specification read from the specification file.
38     :param data: Data to process.
39     :type spec: Specification
40     :type data: InputData
41     """
42
43     logging.info("Generating the tables ...")
44     for table in spec.tables:
45         try:
46             eval(table["algorithm"])(table, data)
47         except NameError as err:
48             logging.error("Probably algorithm '{alg}' is not defined: {err}".
49                           format(alg=table["algorithm"], err=repr(err)))
50     logging.info("Done.")
51
52
53 def table_details(table, input_data):
54     """Generate the table(s) with algorithm: table_detailed_test_results
55     specified in the specification file.
56
57     :param table: Table to generate.
58     :param input_data: Data to process.
59     :type table: pandas.Series
60     :type input_data: InputData
61     """
62
63     logging.info("  Generating the table {0} ...".
64                  format(table.get("title", "")))
65
66     # Transform the data
67     logging.info("    Creating the data set for the {0} '{1}'.".
68                  format(table.get("type", ""), table.get("title", "")))
69     data = input_data.filter_data(table)
70
71     # Prepare the header of the tables
72     header = list()
73     for column in table["columns"]:
74         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
75
76     # Generate the data for the table according to the model in the table
77     # specification
78     job = table["data"].keys()[0]
79     build = str(table["data"][job][0])
80     try:
81         suites = input_data.suites(job, build)
82     except KeyError:
83         logging.error("    No data available. The table will not be generated.")
84         return
85
86     for suite_longname, suite in suites.iteritems():
87         # Generate data
88         suite_name = suite["name"]
89         table_lst = list()
90         for test in data[job][build].keys():
91             if data[job][build][test]["parent"] in suite_name:
92                 row_lst = list()
93                 for column in table["columns"]:
94                     try:
95                         col_data = str(data[job][build][test][column["data"].
96                                        split(" ")[1]]).replace('"', '""')
97                         if column["data"].split(" ")[1] in ("vat-history",
98                                                             "show-run"):
99                             col_data = replace(col_data, " |br| ", "",
100                                                maxreplace=1)
101                             col_data = " |prein| {0} |preout| ".\
102                                 format(col_data[:-5])
103                         row_lst.append('"{0}"'.format(col_data))
104                     except KeyError:
105                         row_lst.append("No data")
106                 table_lst.append(row_lst)
107
108         # Write the data to file
109         if table_lst:
110             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111                                             table["output-file-ext"])
112             logging.info("      Writing file: '{}'".format(file_name))
113             with open(file_name, "w") as file_handler:
114                 file_handler.write(",".join(header) + "\n")
115                 for item in table_lst:
116                     file_handler.write(",".join(item) + "\n")
117
118     logging.info("  Done.")
119
120
121 def table_merged_details(table, input_data):
122     """Generate the table(s) with algorithm: table_merged_details
123     specified in the specification file.
124
125     :param table: Table to generate.
126     :param input_data: Data to process.
127     :type table: pandas.Series
128     :type input_data: InputData
129     """
130
131     logging.info("  Generating the table {0} ...".
132                  format(table.get("title", "")))
133
134     # Transform the data
135     logging.info("    Creating the data set for the {0} '{1}'.".
136                  format(table.get("type", ""), table.get("title", "")))
137     data = input_data.filter_data(table)
138     data = input_data.merge_data(data)
139     data.sort_index(inplace=True)
140
141     logging.info("    Creating the data set for the {0} '{1}'.".
142                  format(table.get("type", ""), table.get("title", "")))
143     suites = input_data.filter_data(table, data_set="suites")
144     suites = input_data.merge_data(suites)
145
146     # Prepare the header of the tables
147     header = list()
148     for column in table["columns"]:
149         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
150
151     for _, suite in suites.iteritems():
152         # Generate data
153         suite_name = suite["name"]
154         table_lst = list()
155         for test in data.keys():
156             if data[test]["parent"] in suite_name:
157                 row_lst = list()
158                 for column in table["columns"]:
159                     try:
160                         col_data = str(data[test][column["data"].
161                                        split(" ")[1]]).replace('"', '""')
162                         if column["data"].split(" ")[1] in ("vat-history",
163                                                             "show-run"):
164                             col_data = replace(col_data, " |br| ", "",
165                                                maxreplace=1)
166                             col_data = " |prein| {0} |preout| ".\
167                                 format(col_data[:-5])
168                         row_lst.append('"{0}"'.format(col_data))
169                     except KeyError:
170                         row_lst.append("No data")
171                 table_lst.append(row_lst)
172
173         # Write the data to file
174         if table_lst:
175             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176                                             table["output-file-ext"])
177             logging.info("      Writing file: '{}'".format(file_name))
178             with open(file_name, "w") as file_handler:
179                 file_handler.write(",".join(header) + "\n")
180                 for item in table_lst:
181                     file_handler.write(",".join(item) + "\n")
182
183     logging.info("  Done.")
184
185
186 def table_performance_comparison(table, input_data):
187     """Generate the table(s) with algorithm: table_performance_comparison
188     specified in the specification file.
189
190     :param table: Table to generate.
191     :param input_data: Data to process.
192     :type table: pandas.Series
193     :type input_data: InputData
194     """
195
196     logging.info("  Generating the table {0} ...".
197                  format(table.get("title", "")))
198
199     # Transform the data
200     logging.info("    Creating the data set for the {0} '{1}'.".
201                  format(table.get("type", ""), table.get("title", "")))
202     data = input_data.filter_data(table, continue_on_error=True)
203
204     # Prepare the header of the tables
205     try:
206         header = ["Test case", ]
207
208         if table["include-tests"] == "MRR":
209             hdr_param = "Receive Rate"
210         else:
211             hdr_param = "Throughput"
212
213         history = table.get("history", None)
214         if history:
215             for item in history:
216                 header.extend(
217                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218                      "{0} Stdev [Mpps]".format(item["title"])])
219         header.extend(
220             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
224              "Delta [%]"])
225         header_str = ",".join(header) + "\n"
226     except (AttributeError, KeyError) as err:
227         logging.error("The model is invalid, missing parameter: {0}".
228                       format(err))
229         return
230
231     # Prepare data to the table:
232     tbl_dict = dict()
233     for job, builds in table["reference"]["data"].items():
234         for build in builds:
235             for tst_name, tst_data in data[job][str(build)].iteritems():
236                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
238                     replace("-ndrdisc", "").replace("-pdr", "").\
239                     replace("-ndr", "").\
240                     replace("1t1c", "1c").replace("2t1c", "1c").\
241                     replace("2t2c", "2c").replace("4t2c", "2c").\
242                     replace("4t4c", "4c").replace("8t4c", "4c")
243                 if "across topologies" in table["title"].lower():
244                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
245                 if tbl_dict.get(tst_name_mod, None) is None:
246                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
247                                             "-".join(tst_data["name"].
248                                                      split("-")[:-1]))
249                     if "across testbeds" in table["title"].lower() or \
250                             "across topologies" in table["title"].lower():
251                         name = name.\
252                             replace("1t1c", "1c").replace("2t1c", "1c").\
253                             replace("2t2c", "2c").replace("4t2c", "2c").\
254                             replace("4t4c", "4c").replace("8t4c", "4c")
255                     tbl_dict[tst_name_mod] = {"name": name,
256                                               "ref-data": list(),
257                                               "cmp-data": list()}
258                 try:
259                     # TODO: Re-work when NDRPDRDISC tests are not used
260                     if table["include-tests"] == "MRR":
261                         tbl_dict[tst_name_mod]["ref-data"]. \
262                             append(tst_data["result"]["receive-rate"].avg)
263                     elif table["include-tests"] == "PDR":
264                         if tst_data["type"] == "PDR":
265                             tbl_dict[tst_name_mod]["ref-data"]. \
266                                 append(tst_data["throughput"]["value"])
267                         elif tst_data["type"] == "NDRPDR":
268                             tbl_dict[tst_name_mod]["ref-data"].append(
269                                 tst_data["throughput"]["PDR"]["LOWER"])
270                     elif table["include-tests"] == "NDR":
271                         if tst_data["type"] == "NDR":
272                             tbl_dict[tst_name_mod]["ref-data"]. \
273                                 append(tst_data["throughput"]["value"])
274                         elif tst_data["type"] == "NDRPDR":
275                             tbl_dict[tst_name_mod]["ref-data"].append(
276                                 tst_data["throughput"]["NDR"]["LOWER"])
277                     else:
278                         continue
279                 except TypeError:
280                     pass  # No data in output.xml for this test
281
282     for job, builds in table["compare"]["data"].items():
283         for build in builds:
284             for tst_name, tst_data in data[job][str(build)].iteritems():
285                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
286                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
287                     replace("-ndrdisc", "").replace("-pdr", ""). \
288                     replace("-ndr", "").\
289                     replace("1t1c", "1c").replace("2t1c", "1c").\
290                     replace("2t2c", "2c").replace("4t2c", "2c").\
291                     replace("4t4c", "4c").replace("8t4c", "4c")
292                 if "across topologies" in table["title"].lower():
293                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
294                 try:
295                     # TODO: Re-work when NDRPDRDISC tests are not used
296                     if table["include-tests"] == "MRR":
297                         tbl_dict[tst_name_mod]["cmp-data"]. \
298                             append(tst_data["result"]["receive-rate"].avg)
299                     elif table["include-tests"] == "PDR":
300                         if tst_data["type"] == "PDR":
301                             tbl_dict[tst_name_mod]["cmp-data"]. \
302                                 append(tst_data["throughput"]["value"])
303                         elif tst_data["type"] == "NDRPDR":
304                             tbl_dict[tst_name_mod]["cmp-data"].append(
305                                 tst_data["throughput"]["PDR"]["LOWER"])
306                     elif table["include-tests"] == "NDR":
307                         if tst_data["type"] == "NDR":
308                             tbl_dict[tst_name_mod]["cmp-data"]. \
309                                 append(tst_data["throughput"]["value"])
310                         elif tst_data["type"] == "NDRPDR":
311                             tbl_dict[tst_name_mod]["cmp-data"].append(
312                                 tst_data["throughput"]["NDR"]["LOWER"])
313                     else:
314                         continue
315                 except KeyError:
316                     pass
317                 except TypeError:
318                     tbl_dict.pop(tst_name_mod, None)
319     if history:
320         for item in history:
321             for job, builds in item["data"].items():
322                 for build in builds:
323                     for tst_name, tst_data in data[job][str(build)].iteritems():
324                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
325                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
326                             replace("-ndrdisc", "").replace("-pdr", ""). \
327                             replace("-ndr", "").\
328                             replace("1t1c", "1c").replace("2t1c", "1c").\
329                             replace("2t2c", "2c").replace("4t2c", "2c").\
330                             replace("4t4c", "4c").replace("8t4c", "4c")
331                         if "across topologies" in table["title"].lower():
332                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
333                         if tbl_dict.get(tst_name_mod, None) is None:
334                             continue
335                         if tbl_dict[tst_name_mod].get("history", None) is None:
336                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
337                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
338                                                              None) is None:
339                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
340                                 list()
341                         try:
342                             # TODO: Re-work when NDRPDRDISC tests are not used
343                             if table["include-tests"] == "MRR":
344                                 tbl_dict[tst_name_mod]["history"][item["title"
345                                 ]].append(tst_data["result"]["receive-rate"].
346                                           avg)
347                             elif table["include-tests"] == "PDR":
348                                 if tst_data["type"] == "PDR":
349                                     tbl_dict[tst_name_mod]["history"][
350                                         item["title"]].\
351                                         append(tst_data["throughput"]["value"])
352                                 elif tst_data["type"] == "NDRPDR":
353                                     tbl_dict[tst_name_mod]["history"][item[
354                                         "title"]].append(tst_data["throughput"][
355                                         "PDR"]["LOWER"])
356                             elif table["include-tests"] == "NDR":
357                                 if tst_data["type"] == "NDR":
358                                     tbl_dict[tst_name_mod]["history"][
359                                         item["title"]].\
360                                         append(tst_data["throughput"]["value"])
361                                 elif tst_data["type"] == "NDRPDR":
362                                     tbl_dict[tst_name_mod]["history"][item[
363                                         "title"]].append(tst_data["throughput"][
364                                         "NDR"]["LOWER"])
365                             else:
366                                 continue
367                         except (TypeError, KeyError):
368                             pass
369
370     tbl_lst = list()
371     for tst_name in tbl_dict.keys():
372         item = [tbl_dict[tst_name]["name"], ]
373         if history:
374             if tbl_dict[tst_name].get("history", None) is not None:
375                 for hist_data in tbl_dict[tst_name]["history"].values():
376                     if hist_data:
377                         item.append(round(mean(hist_data) / 1000000, 2))
378                         item.append(round(stdev(hist_data) / 1000000, 2))
379                     else:
380                         item.extend([None, None])
381             else:
382                 item.extend([None, None])
383         data_t = tbl_dict[tst_name]["ref-data"]
384         if data_t:
385             item.append(round(mean(data_t) / 1000000, 2))
386             item.append(round(stdev(data_t) / 1000000, 2))
387         else:
388             item.extend([None, None])
389         data_t = tbl_dict[tst_name]["cmp-data"]
390         if data_t:
391             item.append(round(mean(data_t) / 1000000, 2))
392             item.append(round(stdev(data_t) / 1000000, 2))
393         else:
394             item.extend([None, None])
395         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
396             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
397         if len(item) == len(header):
398             tbl_lst.append(item)
399
400     # Sort the table according to the relative change
401     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
402
403     # Generate csv tables:
404     csv_file = "{0}.csv".format(table["output-file"])
405     with open(csv_file, "w") as file_handler:
406         file_handler.write(header_str)
407         for test in tbl_lst:
408             file_handler.write(",".join([str(item) for item in test]) + "\n")
409
410     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
411
412
413 def table_performance_trending_dashboard(table, input_data):
414     """Generate the table(s) with algorithm:
415     table_performance_trending_dashboard
416     specified in the specification file.
417
418     :param table: Table to generate.
419     :param input_data: Data to process.
420     :type table: pandas.Series
421     :type input_data: InputData
422     """
423
424     logging.info("  Generating the table {0} ...".
425                  format(table.get("title", "")))
426
427     # Transform the data
428     logging.info("    Creating the data set for the {0} '{1}'.".
429                  format(table.get("type", ""), table.get("title", "")))
430     data = input_data.filter_data(table, continue_on_error=True)
431
432     # Prepare the header of the tables
433     header = ["Test Case",
434               "Trend [Mpps]",
435               "Short-Term Change [%]",
436               "Long-Term Change [%]",
437               "Regressions [#]",
438               "Progressions [#]"
439               ]
440     header_str = ",".join(header) + "\n"
441
442     # Prepare data to the table:
443     tbl_dict = dict()
444     for job, builds in table["data"].items():
445         for build in builds:
446             for tst_name, tst_data in data[job][str(build)].iteritems():
447                 if tst_name.lower() in table["ignore-list"]:
448                     continue
449                 if tbl_dict.get(tst_name, None) is None:
450                     groups = re.search(REGEX_NIC, tst_data["parent"])
451                     if not groups:
452                         continue
453                     nic = groups.group(0)
454                     tbl_dict[tst_name] = {
455                         "name": "{0}-{1}".format(nic, tst_data["name"]),
456                         "data": OrderedDict()}
457                 try:
458                     tbl_dict[tst_name]["data"][str(build)] = \
459                         tst_data["result"]["receive-rate"]
460                 except (TypeError, KeyError):
461                     pass  # No data in output.xml for this test
462
463     tbl_lst = list()
464     for tst_name in tbl_dict.keys():
465         data_t = tbl_dict[tst_name]["data"]
466         if len(data_t) < 2:
467             continue
468
469         classification_lst, avgs = classify_anomalies(data_t)
470
471         win_size = min(len(data_t), table["window"])
472         long_win_size = min(len(data_t), table["long-trend-window"])
473
474         try:
475             max_long_avg = max(
476                 [x for x in avgs[-long_win_size:-win_size]
477                  if not isnan(x)])
478         except ValueError:
479             max_long_avg = nan
480         last_avg = avgs[-1]
481         avg_week_ago = avgs[max(-win_size, -len(avgs))]
482
483         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
484             rel_change_last = nan
485         else:
486             rel_change_last = round(
487                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
488
489         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
490             rel_change_long = nan
491         else:
492             rel_change_long = round(
493                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
494
495         if classification_lst:
496             if isnan(rel_change_last) and isnan(rel_change_long):
497                 continue
498             tbl_lst.append(
499                 [tbl_dict[tst_name]["name"],
500                  '-' if isnan(last_avg) else
501                  round(last_avg / 1000000, 2),
502                  '-' if isnan(rel_change_last) else rel_change_last,
503                  '-' if isnan(rel_change_long) else rel_change_long,
504                  classification_lst[-win_size:].count("regression"),
505                  classification_lst[-win_size:].count("progression")])
506
507     tbl_lst.sort(key=lambda rel: rel[0])
508
509     tbl_sorted = list()
510     for nrr in range(table["window"], -1, -1):
511         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
512         for nrp in range(table["window"], -1, -1):
513             tbl_out = [item for item in tbl_reg if item[5] == nrp]
514             tbl_out.sort(key=lambda rel: rel[2])
515             tbl_sorted.extend(tbl_out)
516
517     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
518
519     logging.info("    Writing file: '{0}'".format(file_name))
520     with open(file_name, "w") as file_handler:
521         file_handler.write(header_str)
522         for test in tbl_sorted:
523             file_handler.write(",".join([str(item) for item in test]) + '\n')
524
525     txt_file_name = "{0}.txt".format(table["output-file"])
526     logging.info("    Writing file: '{0}'".format(txt_file_name))
527     convert_csv_to_pretty_txt(file_name, txt_file_name)
528
529
530 def _generate_url(base, testbed, test_name):
531     """Generate URL to a trending plot from the name of the test case.
532
533     :param base: The base part of URL common to all test cases.
534     :param testbed: The testbed used for testing.
535     :param test_name: The name of the test case.
536     :type base: str
537     :type testbed: str
538     :type test_name: str
539     :returns: The URL to the plot with the trending data for the given test
540         case.
541     :rtype str
542     """
543
544     url = base
545     file_name = ""
546     anchor = ".html#"
547     feature = ""
548
549     if "lbdpdk" in test_name or "lbvpp" in test_name:
550         file_name = "link_bonding"
551
552     elif "114b" in test_name and "vhost" in test_name:
553         file_name = "vts"
554
555     elif "testpmd" in test_name or "l3fwd" in test_name:
556         file_name = "dpdk"
557
558     elif "memif" in test_name:
559         file_name = "container_memif"
560         feature = "-base"
561
562     elif "srv6" in test_name:
563         file_name = "srv6"
564
565     elif "vhost" in test_name:
566         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
567             file_name = "vm_vhost_l2"
568             if "114b" in test_name:
569                 feature = ""
570             elif "l2xcbase" in test_name:
571                 feature = "-base-l2xc"
572             elif "l2bdbasemaclrn" in test_name:
573                 feature = "-base-l2bd"
574             else:
575                 feature = "-base"
576         elif "ip4base" in test_name:
577             file_name = "vm_vhost_ip4"
578             feature = "-base"
579
580     elif "ipsec" in test_name:
581         file_name = "ipsec"
582         feature = "-base-scale"
583
584     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
585         file_name = "ip4_tunnels"
586         feature = "-base"
587
588     elif "ip4base" in test_name or "ip4scale" in test_name:
589         file_name = "ip4"
590         if "xl710" in test_name:
591             feature = "-base-scale-features"
592         elif "iacl" in test_name:
593             feature = "-features-iacl"
594         elif "oacl" in test_name:
595             feature = "-features-oacl"
596         elif "snat" in test_name or "cop" in test_name:
597             feature = "-features"
598         else:
599             feature = "-base-scale"
600
601     elif "ip6base" in test_name or "ip6scale" in test_name:
602         file_name = "ip6"
603         feature = "-base-scale"
604
605     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
606             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
607             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
608         file_name = "l2"
609         if "macip" in test_name:
610             feature = "-features-macip"
611         elif "iacl" in test_name:
612             feature = "-features-iacl"
613         elif "oacl" in test_name:
614             feature = "-features-oacl"
615         else:
616             feature = "-base-scale"
617
618     if "x520" in test_name:
619         nic = "x520-"
620     elif "x710" in test_name:
621         nic = "x710-"
622     elif "xl710" in test_name:
623         nic = "xl710-"
624     elif "xxv710" in test_name:
625         nic = "xxv710-"
626     else:
627         nic = ""
628     anchor += nic
629
630     if "64b" in test_name:
631         framesize = "64b"
632     elif "78b" in test_name:
633         framesize = "78b"
634     elif "imix" in test_name:
635         framesize = "imix"
636     elif "9000b" in test_name:
637         framesize = "9000b"
638     elif "1518b" in test_name:
639         framesize = "1518b"
640     elif "114b" in test_name:
641         framesize = "114b"
642     else:
643         framesize = ""
644     anchor += framesize + '-'
645
646     if "1t1c" in test_name:
647         anchor += "1t1c"
648     elif "2t2c" in test_name:
649         anchor += "2t2c"
650     elif "4t4c" in test_name:
651         anchor += "4t4c"
652     elif "2t1c" in test_name:
653         anchor += "2t1c"
654     elif "4t2c" in test_name:
655         anchor += "4t2c"
656     elif "8t4c" in test_name:
657         anchor += "8t4c"
658
659     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
660            anchor + feature
661
662
663 def table_performance_trending_dashboard_html(table, input_data):
664     """Generate the table(s) with algorithm:
665     table_performance_trending_dashboard_html specified in the specification
666     file.
667
668     :param table: Table to generate.
669     :param input_data: Data to process.
670     :type table: dict
671     :type input_data: InputData
672     """
673
674     testbed = table.get("testbed", None)
675     if testbed is None:
676         logging.error("The testbed is not defined for the table '{0}'.".
677                       format(table.get("title", "")))
678         return
679
680     logging.info("  Generating the table {0} ...".
681                  format(table.get("title", "")))
682
683     try:
684         with open(table["input-file"], 'rb') as csv_file:
685             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
686             csv_lst = [item for item in csv_content]
687     except KeyError:
688         logging.warning("The input file is not defined.")
689         return
690     except csv.Error as err:
691         logging.warning("Not possible to process the file '{0}'.\n{1}".
692                         format(table["input-file"], err))
693         return
694
695     # Table:
696     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
697
698     # Table header:
699     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
700     for idx, item in enumerate(csv_lst[0]):
701         alignment = "left" if idx == 0 else "center"
702         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
703         th.text = item
704
705     # Rows:
706     colors = {"regression": ("#ffcccc", "#ff9999"),
707               "progression": ("#c6ecc6", "#9fdf9f"),
708               "normal": ("#e9f1fb", "#d4e4f7")}
709     for r_idx, row in enumerate(csv_lst[1:]):
710         if int(row[4]):
711             color = "regression"
712         elif int(row[5]):
713             color = "progression"
714         else:
715             color = "normal"
716         background = colors[color][r_idx % 2]
717         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
718
719         # Columns:
720         for c_idx, item in enumerate(row):
721             alignment = "left" if c_idx == 0 else "center"
722             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
723             # Name:
724             if c_idx == 0:
725                 url = _generate_url("../trending/", testbed, item)
726                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
727                 ref.text = item
728             else:
729                 td.text = item
730     try:
731         with open(table["output-file"], 'w') as html_file:
732             logging.info("    Writing file: '{0}'".format(table["output-file"]))
733             html_file.write(".. raw:: html\n\n\t")
734             html_file.write(ET.tostring(dashboard))
735             html_file.write("\n\t<p><br><br></p>\n")
736     except KeyError:
737         logging.warning("The output file is not defined.")
738         return
739
740
741 def table_failed_tests(table, input_data):
742     """Generate the table(s) with algorithm: table_failed_tests
743     specified in the specification file.
744
745     :param table: Table to generate.
746     :param input_data: Data to process.
747     :type table: pandas.Series
748     :type input_data: InputData
749     """
750
751     logging.info("  Generating the table {0} ...".
752                  format(table.get("title", "")))
753
754     # Transform the data
755     logging.info("    Creating the data set for the {0} '{1}'.".
756                  format(table.get("type", ""), table.get("title", "")))
757     data = input_data.filter_data(table, continue_on_error=True)
758
759     # Prepare the header of the tables
760     header = ["Test Case",
761               "Failures [#]",
762               "Last Failure [Time]",
763               "Last Failure [VPP-Build-Id]",
764               "Last Failure [CSIT-Job-Build-Id]"]
765
766     # Generate the data for the table according to the model in the table
767     # specification
768     tbl_dict = dict()
769     for job, builds in table["data"].items():
770         for build in builds:
771             build = str(build)
772             for tst_name, tst_data in data[job][build].iteritems():
773                 if tst_name.lower() in table["ignore-list"]:
774                     continue
775                 if tbl_dict.get(tst_name, None) is None:
776                     groups = re.search(REGEX_NIC, tst_data["parent"])
777                     if not groups:
778                         continue
779                     nic = groups.group(0)
780                     tbl_dict[tst_name] = {
781                         "name": "{0}-{1}".format(nic, tst_data["name"]),
782                         "data": OrderedDict()}
783                 try:
784                     tbl_dict[tst_name]["data"][build] = (
785                         tst_data["status"],
786                         input_data.metadata(job, build).get("generated", ""),
787                         input_data.metadata(job, build).get("version", ""),
788                         build)
789                 except (TypeError, KeyError):
790                     pass  # No data in output.xml for this test
791
792     tbl_lst = list()
793     for tst_data in tbl_dict.values():
794         win_size = min(len(tst_data["data"]), table["window"])
795         fails_nr = 0
796         for val in tst_data["data"].values()[-win_size:]:
797             if val[0] == "FAIL":
798                 fails_nr += 1
799                 fails_last_date = val[1]
800                 fails_last_vpp = val[2]
801                 fails_last_csit = val[3]
802         if fails_nr:
803             tbl_lst.append([tst_data["name"],
804                             fails_nr,
805                             fails_last_date,
806                             fails_last_vpp,
807                             "mrr-daily-build-{0}".format(fails_last_csit)])
808
809     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
810     tbl_sorted = list()
811     for nrf in range(table["window"], -1, -1):
812         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
813         tbl_sorted.extend(tbl_fails)
814     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
815
816     logging.info("    Writing file: '{0}'".format(file_name))
817     with open(file_name, "w") as file_handler:
818         file_handler.write(",".join(header) + "\n")
819         for test in tbl_sorted:
820             file_handler.write(",".join([str(item) for item in test]) + '\n')
821
822     txt_file_name = "{0}.txt".format(table["output-file"])
823     logging.info("    Writing file: '{0}'".format(txt_file_name))
824     convert_csv_to_pretty_txt(file_name, txt_file_name)
825
826
827 def table_failed_tests_html(table, input_data):
828     """Generate the table(s) with algorithm: table_failed_tests_html
829     specified in the specification file.
830
831     :param table: Table to generate.
832     :param input_data: Data to process.
833     :type table: pandas.Series
834     :type input_data: InputData
835     """
836
837     testbed = table.get("testbed", None)
838     if testbed is None:
839         logging.error("The testbed is not defined for the table '{0}'.".
840                       format(table.get("title", "")))
841         return
842
843     logging.info("  Generating the table {0} ...".
844                  format(table.get("title", "")))
845
846     try:
847         with open(table["input-file"], 'rb') as csv_file:
848             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
849             csv_lst = [item for item in csv_content]
850     except KeyError:
851         logging.warning("The input file is not defined.")
852         return
853     except csv.Error as err:
854         logging.warning("Not possible to process the file '{0}'.\n{1}".
855                         format(table["input-file"], err))
856         return
857
858     # Table:
859     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
860
861     # Table header:
862     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
863     for idx, item in enumerate(csv_lst[0]):
864         alignment = "left" if idx == 0 else "center"
865         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
866         th.text = item
867
868     # Rows:
869     colors = ("#e9f1fb", "#d4e4f7")
870     for r_idx, row in enumerate(csv_lst[1:]):
871         background = colors[r_idx % 2]
872         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
873
874         # Columns:
875         for c_idx, item in enumerate(row):
876             alignment = "left" if c_idx == 0 else "center"
877             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
878             # Name:
879             if c_idx == 0:
880                 url = _generate_url("../trending/", testbed, item)
881                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
882                 ref.text = item
883             else:
884                 td.text = item
885     try:
886         with open(table["output-file"], 'w') as html_file:
887             logging.info("    Writing file: '{0}'".format(table["output-file"]))
888             html_file.write(".. raw:: html\n\n\t")
889             html_file.write(ET.tostring(failed_tests))
890             html_file.write("\n\t<p><br><br></p>\n")
891     except KeyError:
892         logging.warning("The output file is not defined.")
893         return