CSIT-1340: Fix the list of failed tests in Trending
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("vat-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         if column["data"].split(" ")[1] in ("vat-history",
165                                                             "show-run"):
166                             col_data = replace(col_data, " |br| ", "",
167                                                maxreplace=1)
168                             col_data = " |prein| {0} |preout| ".\
169                                 format(col_data[:-5])
170                         row_lst.append('"{0}"'.format(col_data))
171                     except KeyError:
172                         row_lst.append("No data")
173                 table_lst.append(row_lst)
174
175         # Write the data to file
176         if table_lst:
177             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178                                             table["output-file-ext"])
179             logging.info("      Writing file: '{}'".format(file_name))
180             with open(file_name, "w") as file_handler:
181                 file_handler.write(",".join(header) + "\n")
182                 for item in table_lst:
183                     file_handler.write(",".join(item) + "\n")
184
185     logging.info("  Done.")
186
187
188 def table_performance_comparison(table, input_data):
189     """Generate the table(s) with algorithm: table_performance_comparison
190     specified in the specification file.
191
192     :param table: Table to generate.
193     :param input_data: Data to process.
194     :type table: pandas.Series
195     :type input_data: InputData
196     """
197
198     logging.info("  Generating the table {0} ...".
199                  format(table.get("title", "")))
200
201     # Transform the data
202     logging.info("    Creating the data set for the {0} '{1}'.".
203                  format(table.get("type", ""), table.get("title", "")))
204     data = input_data.filter_data(table, continue_on_error=True)
205
206     # Prepare the header of the tables
207     try:
208         header = ["Test case", ]
209
210         if table["include-tests"] == "MRR":
211             hdr_param = "Receive Rate"
212         else:
213             hdr_param = "Throughput"
214
215         history = table.get("history", None)
216         if history:
217             for item in history:
218                 header.extend(
219                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220                      "{0} Stdev [Mpps]".format(item["title"])])
221         header.extend(
222             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
226              "Delta [%]"])
227         header_str = ",".join(header) + "\n"
228     except (AttributeError, KeyError) as err:
229         logging.error("The model is invalid, missing parameter: {0}".
230                       format(err))
231         return
232
233     # Prepare data to the table:
234     tbl_dict = dict()
235     for job, builds in table["reference"]["data"].items():
236         for build in builds:
237             for tst_name, tst_data in data[job][str(build)].iteritems():
238                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
240                     replace("-ndrdisc", "").replace("-pdr", "").\
241                     replace("-ndr", "").\
242                     replace("1t1c", "1c").replace("2t1c", "1c").\
243                     replace("2t2c", "2c").replace("4t2c", "2c").\
244                     replace("4t4c", "4c").replace("8t4c", "4c")
245                 if "across topologies" in table["title"].lower():
246                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
247                 if tbl_dict.get(tst_name_mod, None) is None:
248                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
249                                             "-".join(tst_data["name"].
250                                                      split("-")[:-1]))
251                     if "across testbeds" in table["title"].lower() or \
252                             "across topologies" in table["title"].lower():
253                         name = name.\
254                             replace("1t1c", "1c").replace("2t1c", "1c").\
255                             replace("2t2c", "2c").replace("4t2c", "2c").\
256                             replace("4t4c", "4c").replace("8t4c", "4c")
257                     tbl_dict[tst_name_mod] = {"name": name,
258                                               "ref-data": list(),
259                                               "cmp-data": list()}
260                 try:
261                     # TODO: Re-work when NDRPDRDISC tests are not used
262                     if table["include-tests"] == "MRR":
263                         tbl_dict[tst_name_mod]["ref-data"]. \
264                             append(tst_data["result"]["receive-rate"].avg)
265                     elif table["include-tests"] == "PDR":
266                         if tst_data["type"] == "PDR":
267                             tbl_dict[tst_name_mod]["ref-data"]. \
268                                 append(tst_data["throughput"]["value"])
269                         elif tst_data["type"] == "NDRPDR":
270                             tbl_dict[tst_name_mod]["ref-data"].append(
271                                 tst_data["throughput"]["PDR"]["LOWER"])
272                     elif table["include-tests"] == "NDR":
273                         if tst_data["type"] == "NDR":
274                             tbl_dict[tst_name_mod]["ref-data"]. \
275                                 append(tst_data["throughput"]["value"])
276                         elif tst_data["type"] == "NDRPDR":
277                             tbl_dict[tst_name_mod]["ref-data"].append(
278                                 tst_data["throughput"]["NDR"]["LOWER"])
279                     else:
280                         continue
281                 except TypeError:
282                     pass  # No data in output.xml for this test
283
284     for job, builds in table["compare"]["data"].items():
285         for build in builds:
286             for tst_name, tst_data in data[job][str(build)].iteritems():
287                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
288                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
289                     replace("-ndrdisc", "").replace("-pdr", ""). \
290                     replace("-ndr", "").\
291                     replace("1t1c", "1c").replace("2t1c", "1c").\
292                     replace("2t2c", "2c").replace("4t2c", "2c").\
293                     replace("4t4c", "4c").replace("8t4c", "4c")
294                 if "across topologies" in table["title"].lower():
295                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
296                 try:
297                     # TODO: Re-work when NDRPDRDISC tests are not used
298                     if table["include-tests"] == "MRR":
299                         tbl_dict[tst_name_mod]["cmp-data"]. \
300                             append(tst_data["result"]["receive-rate"].avg)
301                     elif table["include-tests"] == "PDR":
302                         if tst_data["type"] == "PDR":
303                             tbl_dict[tst_name_mod]["cmp-data"]. \
304                                 append(tst_data["throughput"]["value"])
305                         elif tst_data["type"] == "NDRPDR":
306                             tbl_dict[tst_name_mod]["cmp-data"].append(
307                                 tst_data["throughput"]["PDR"]["LOWER"])
308                     elif table["include-tests"] == "NDR":
309                         if tst_data["type"] == "NDR":
310                             tbl_dict[tst_name_mod]["cmp-data"]. \
311                                 append(tst_data["throughput"]["value"])
312                         elif tst_data["type"] == "NDRPDR":
313                             tbl_dict[tst_name_mod]["cmp-data"].append(
314                                 tst_data["throughput"]["NDR"]["LOWER"])
315                     else:
316                         continue
317                 except KeyError:
318                     pass
319                 except TypeError:
320                     tbl_dict.pop(tst_name_mod, None)
321     if history:
322         for item in history:
323             for job, builds in item["data"].items():
324                 for build in builds:
325                     for tst_name, tst_data in data[job][str(build)].iteritems():
326                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
327                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
328                             replace("-ndrdisc", "").replace("-pdr", ""). \
329                             replace("-ndr", "").\
330                             replace("1t1c", "1c").replace("2t1c", "1c").\
331                             replace("2t2c", "2c").replace("4t2c", "2c").\
332                             replace("4t4c", "4c").replace("8t4c", "4c")
333                         if "across topologies" in table["title"].lower():
334                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
335                         if tbl_dict.get(tst_name_mod, None) is None:
336                             continue
337                         if tbl_dict[tst_name_mod].get("history", None) is None:
338                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
339                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
340                                                              None) is None:
341                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
342                                 list()
343                         try:
344                             # TODO: Re-work when NDRPDRDISC tests are not used
345                             if table["include-tests"] == "MRR":
346                                 tbl_dict[tst_name_mod]["history"][item["title"
347                                 ]].append(tst_data["result"]["receive-rate"].
348                                           avg)
349                             elif table["include-tests"] == "PDR":
350                                 if tst_data["type"] == "PDR":
351                                     tbl_dict[tst_name_mod]["history"][
352                                         item["title"]].\
353                                         append(tst_data["throughput"]["value"])
354                                 elif tst_data["type"] == "NDRPDR":
355                                     tbl_dict[tst_name_mod]["history"][item[
356                                         "title"]].append(tst_data["throughput"][
357                                         "PDR"]["LOWER"])
358                             elif table["include-tests"] == "NDR":
359                                 if tst_data["type"] == "NDR":
360                                     tbl_dict[tst_name_mod]["history"][
361                                         item["title"]].\
362                                         append(tst_data["throughput"]["value"])
363                                 elif tst_data["type"] == "NDRPDR":
364                                     tbl_dict[tst_name_mod]["history"][item[
365                                         "title"]].append(tst_data["throughput"][
366                                         "NDR"]["LOWER"])
367                             else:
368                                 continue
369                         except (TypeError, KeyError):
370                             pass
371
372     tbl_lst = list()
373     for tst_name in tbl_dict.keys():
374         item = [tbl_dict[tst_name]["name"], ]
375         if history:
376             if tbl_dict[tst_name].get("history", None) is not None:
377                 for hist_data in tbl_dict[tst_name]["history"].values():
378                     if hist_data:
379                         item.append(round(mean(hist_data) / 1000000, 2))
380                         item.append(round(stdev(hist_data) / 1000000, 2))
381                     else:
382                         item.extend([None, None])
383             else:
384                 item.extend([None, None])
385         data_t = tbl_dict[tst_name]["ref-data"]
386         if data_t:
387             item.append(round(mean(data_t) / 1000000, 2))
388             item.append(round(stdev(data_t) / 1000000, 2))
389         else:
390             item.extend([None, None])
391         data_t = tbl_dict[tst_name]["cmp-data"]
392         if data_t:
393             item.append(round(mean(data_t) / 1000000, 2))
394             item.append(round(stdev(data_t) / 1000000, 2))
395         else:
396             item.extend([None, None])
397         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
398             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
399         if len(item) == len(header):
400             tbl_lst.append(item)
401
402     # Sort the table according to the relative change
403     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
404
405     # Generate csv tables:
406     csv_file = "{0}.csv".format(table["output-file"])
407     with open(csv_file, "w") as file_handler:
408         file_handler.write(header_str)
409         for test in tbl_lst:
410             file_handler.write(",".join([str(item) for item in test]) + "\n")
411
412     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
413
414
415 def table_performance_trending_dashboard(table, input_data):
416     """Generate the table(s) with algorithm:
417     table_performance_trending_dashboard
418     specified in the specification file.
419
420     :param table: Table to generate.
421     :param input_data: Data to process.
422     :type table: pandas.Series
423     :type input_data: InputData
424     """
425
426     logging.info("  Generating the table {0} ...".
427                  format(table.get("title", "")))
428
429     # Transform the data
430     logging.info("    Creating the data set for the {0} '{1}'.".
431                  format(table.get("type", ""), table.get("title", "")))
432     data = input_data.filter_data(table, continue_on_error=True)
433
434     # Prepare the header of the tables
435     header = ["Test Case",
436               "Trend [Mpps]",
437               "Short-Term Change [%]",
438               "Long-Term Change [%]",
439               "Regressions [#]",
440               "Progressions [#]"
441               ]
442     header_str = ",".join(header) + "\n"
443
444     # Prepare data to the table:
445     tbl_dict = dict()
446     for job, builds in table["data"].items():
447         for build in builds:
448             for tst_name, tst_data in data[job][str(build)].iteritems():
449                 if tst_name.lower() in table["ignore-list"]:
450                     continue
451                 if tbl_dict.get(tst_name, None) is None:
452                     groups = re.search(REGEX_NIC, tst_data["parent"])
453                     if not groups:
454                         continue
455                     nic = groups.group(0)
456                     tbl_dict[tst_name] = {
457                         "name": "{0}-{1}".format(nic, tst_data["name"]),
458                         "data": OrderedDict()}
459                 try:
460                     tbl_dict[tst_name]["data"][str(build)] = \
461                         tst_data["result"]["receive-rate"]
462                 except (TypeError, KeyError):
463                     pass  # No data in output.xml for this test
464
465     tbl_lst = list()
466     for tst_name in tbl_dict.keys():
467         data_t = tbl_dict[tst_name]["data"]
468         if len(data_t) < 2:
469             continue
470
471         classification_lst, avgs = classify_anomalies(data_t)
472
473         win_size = min(len(data_t), table["window"])
474         long_win_size = min(len(data_t), table["long-trend-window"])
475
476         try:
477             max_long_avg = max(
478                 [x for x in avgs[-long_win_size:-win_size]
479                  if not isnan(x)])
480         except ValueError:
481             max_long_avg = nan
482         last_avg = avgs[-1]
483         avg_week_ago = avgs[max(-win_size, -len(avgs))]
484
485         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
486             rel_change_last = nan
487         else:
488             rel_change_last = round(
489                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
490
491         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
492             rel_change_long = nan
493         else:
494             rel_change_long = round(
495                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
496
497         if classification_lst:
498             if isnan(rel_change_last) and isnan(rel_change_long):
499                 continue
500             tbl_lst.append(
501                 [tbl_dict[tst_name]["name"],
502                  '-' if isnan(last_avg) else
503                  round(last_avg / 1000000, 2),
504                  '-' if isnan(rel_change_last) else rel_change_last,
505                  '-' if isnan(rel_change_long) else rel_change_long,
506                  classification_lst[-win_size:].count("regression"),
507                  classification_lst[-win_size:].count("progression")])
508
509     tbl_lst.sort(key=lambda rel: rel[0])
510
511     tbl_sorted = list()
512     for nrr in range(table["window"], -1, -1):
513         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
514         for nrp in range(table["window"], -1, -1):
515             tbl_out = [item for item in tbl_reg if item[5] == nrp]
516             tbl_out.sort(key=lambda rel: rel[2])
517             tbl_sorted.extend(tbl_out)
518
519     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
520
521     logging.info("    Writing file: '{0}'".format(file_name))
522     with open(file_name, "w") as file_handler:
523         file_handler.write(header_str)
524         for test in tbl_sorted:
525             file_handler.write(",".join([str(item) for item in test]) + '\n')
526
527     txt_file_name = "{0}.txt".format(table["output-file"])
528     logging.info("    Writing file: '{0}'".format(txt_file_name))
529     convert_csv_to_pretty_txt(file_name, txt_file_name)
530
531
532 def _generate_url(base, testbed, test_name):
533     """Generate URL to a trending plot from the name of the test case.
534
535     :param base: The base part of URL common to all test cases.
536     :param testbed: The testbed used for testing.
537     :param test_name: The name of the test case.
538     :type base: str
539     :type testbed: str
540     :type test_name: str
541     :returns: The URL to the plot with the trending data for the given test
542         case.
543     :rtype str
544     """
545
546     url = base
547     file_name = ""
548     anchor = ".html#"
549     feature = ""
550
551     if "lbdpdk" in test_name or "lbvpp" in test_name:
552         file_name = "link_bonding"
553
554     elif "114b" in test_name and "vhost" in test_name:
555         file_name = "vts"
556
557     elif "testpmd" in test_name or "l3fwd" in test_name:
558         file_name = "dpdk"
559
560     elif "memif" in test_name:
561         file_name = "container_memif"
562         feature = "-base"
563
564     elif "srv6" in test_name:
565         file_name = "srv6"
566
567     elif "vhost" in test_name:
568         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
569             file_name = "vm_vhost_l2"
570             if "114b" in test_name:
571                 feature = ""
572             elif "l2xcbase" in test_name:
573                 feature = "-base-l2xc"
574             elif "l2bdbasemaclrn" in test_name:
575                 feature = "-base-l2bd"
576             else:
577                 feature = "-base"
578         elif "ip4base" in test_name:
579             file_name = "vm_vhost_ip4"
580             feature = "-base"
581
582     elif "ipsec" in test_name:
583         file_name = "ipsec"
584         feature = "-base-scale"
585
586     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
587         file_name = "ip4_tunnels"
588         feature = "-base"
589
590     elif "ip4base" in test_name or "ip4scale" in test_name:
591         file_name = "ip4"
592         if "xl710" in test_name:
593             feature = "-base-scale-features"
594         elif "iacl" in test_name:
595             feature = "-features-iacl"
596         elif "oacl" in test_name:
597             feature = "-features-oacl"
598         elif "snat" in test_name or "cop" in test_name:
599             feature = "-features"
600         else:
601             feature = "-base-scale"
602
603     elif "ip6base" in test_name or "ip6scale" in test_name:
604         file_name = "ip6"
605         feature = "-base-scale"
606
607     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
608             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
609             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
610         file_name = "l2"
611         if "macip" in test_name:
612             feature = "-features-macip"
613         elif "iacl" in test_name:
614             feature = "-features-iacl"
615         elif "oacl" in test_name:
616             feature = "-features-oacl"
617         else:
618             feature = "-base-scale"
619
620     if "x520" in test_name:
621         nic = "x520-"
622     elif "x710" in test_name:
623         nic = "x710-"
624     elif "xl710" in test_name:
625         nic = "xl710-"
626     elif "xxv710" in test_name:
627         nic = "xxv710-"
628     else:
629         nic = ""
630     anchor += nic
631
632     if "64b" in test_name:
633         framesize = "64b"
634     elif "78b" in test_name:
635         framesize = "78b"
636     elif "imix" in test_name:
637         framesize = "imix"
638     elif "9000b" in test_name:
639         framesize = "9000b"
640     elif "1518b" in test_name:
641         framesize = "1518b"
642     elif "114b" in test_name:
643         framesize = "114b"
644     else:
645         framesize = ""
646     anchor += framesize + '-'
647
648     if "1t1c" in test_name:
649         anchor += "1t1c"
650     elif "2t2c" in test_name:
651         anchor += "2t2c"
652     elif "4t4c" in test_name:
653         anchor += "4t4c"
654     elif "2t1c" in test_name:
655         anchor += "2t1c"
656     elif "4t2c" in test_name:
657         anchor += "4t2c"
658     elif "8t4c" in test_name:
659         anchor += "8t4c"
660
661     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
662            anchor + feature
663
664
665 def table_performance_trending_dashboard_html(table, input_data):
666     """Generate the table(s) with algorithm:
667     table_performance_trending_dashboard_html specified in the specification
668     file.
669
670     :param table: Table to generate.
671     :param input_data: Data to process.
672     :type table: dict
673     :type input_data: InputData
674     """
675
676     testbed = table.get("testbed", None)
677     if testbed is None:
678         logging.error("The testbed is not defined for the table '{0}'.".
679                       format(table.get("title", "")))
680         return
681
682     logging.info("  Generating the table {0} ...".
683                  format(table.get("title", "")))
684
685     try:
686         with open(table["input-file"], 'rb') as csv_file:
687             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
688             csv_lst = [item for item in csv_content]
689     except KeyError:
690         logging.warning("The input file is not defined.")
691         return
692     except csv.Error as err:
693         logging.warning("Not possible to process the file '{0}'.\n{1}".
694                         format(table["input-file"], err))
695         return
696
697     # Table:
698     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
699
700     # Table header:
701     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
702     for idx, item in enumerate(csv_lst[0]):
703         alignment = "left" if idx == 0 else "center"
704         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
705         th.text = item
706
707     # Rows:
708     colors = {"regression": ("#ffcccc", "#ff9999"),
709               "progression": ("#c6ecc6", "#9fdf9f"),
710               "normal": ("#e9f1fb", "#d4e4f7")}
711     for r_idx, row in enumerate(csv_lst[1:]):
712         if int(row[4]):
713             color = "regression"
714         elif int(row[5]):
715             color = "progression"
716         else:
717             color = "normal"
718         background = colors[color][r_idx % 2]
719         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
720
721         # Columns:
722         for c_idx, item in enumerate(row):
723             alignment = "left" if c_idx == 0 else "center"
724             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
725             # Name:
726             if c_idx == 0:
727                 url = _generate_url("../trending/", testbed, item)
728                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
729                 ref.text = item
730             else:
731                 td.text = item
732     try:
733         with open(table["output-file"], 'w') as html_file:
734             logging.info("    Writing file: '{0}'".format(table["output-file"]))
735             html_file.write(".. raw:: html\n\n\t")
736             html_file.write(ET.tostring(dashboard))
737             html_file.write("\n\t<p><br><br></p>\n")
738     except KeyError:
739         logging.warning("The output file is not defined.")
740         return
741
742
743 def table_failed_tests(table, input_data):
744     """Generate the table(s) with algorithm: table_failed_tests
745     specified in the specification file.
746
747     :param table: Table to generate.
748     :param input_data: Data to process.
749     :type table: pandas.Series
750     :type input_data: InputData
751     """
752
753     logging.info("  Generating the table {0} ...".
754                  format(table.get("title", "")))
755
756     # Transform the data
757     logging.info("    Creating the data set for the {0} '{1}'.".
758                  format(table.get("type", ""), table.get("title", "")))
759     data = input_data.filter_data(table, continue_on_error=True)
760
761     # Prepare the header of the tables
762     header = ["Test Case",
763               "Failures [#]",
764               "Last Failure [Time]",
765               "Last Failure [VPP-Build-Id]",
766               "Last Failure [CSIT-Job-Build-Id]"]
767
768     # Generate the data for the table according to the model in the table
769     # specification
770
771     now = dt.utcnow()
772     timeperiod = timedelta(int(table.get("window", 7)))
773
774     tbl_dict = dict()
775     for job, builds in table["data"].items():
776         for build in builds:
777             build = str(build)
778             for tst_name, tst_data in data[job][build].iteritems():
779                 if tst_name.lower() in table["ignore-list"]:
780                     continue
781                 if tbl_dict.get(tst_name, None) is None:
782                     groups = re.search(REGEX_NIC, tst_data["parent"])
783                     if not groups:
784                         continue
785                     nic = groups.group(0)
786                     tbl_dict[tst_name] = {
787                         "name": "{0}-{1}".format(nic, tst_data["name"]),
788                         "data": OrderedDict()}
789                 try:
790                     generated = input_data.metadata(job, build).\
791                         get("generated", "")
792                     if not generated:
793                         continue
794                     then = dt.strptime(generated, "%Y%m%d %H:%M")
795                     if (now - then) <= timeperiod:
796                         tbl_dict[tst_name]["data"][build] = (
797                             tst_data["status"],
798                             generated,
799                             input_data.metadata(job, build).get("version", ""),
800                             build)
801                 except (TypeError, KeyError):
802                     pass  # No data in output.xml for this test
803
804     tbl_lst = list()
805     for tst_data in tbl_dict.values():
806         fails_nr = 0
807         for val in tst_data["data"].values():
808             if val[0] == "FAIL":
809                 fails_nr += 1
810                 fails_last_date = val[1]
811                 fails_last_vpp = val[2]
812                 fails_last_csit = val[3]
813         if fails_nr:
814             tbl_lst.append([tst_data["name"],
815                             fails_nr,
816                             fails_last_date,
817                             fails_last_vpp,
818                             "mrr-daily-build-{0}".format(fails_last_csit)])
819
820     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
821     tbl_sorted = list()
822     for nrf in range(table["window"], -1, -1):
823         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
824         tbl_sorted.extend(tbl_fails)
825     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
826
827     logging.info("    Writing file: '{0}'".format(file_name))
828     with open(file_name, "w") as file_handler:
829         file_handler.write(",".join(header) + "\n")
830         for test in tbl_sorted:
831             file_handler.write(",".join([str(item) for item in test]) + '\n')
832
833     txt_file_name = "{0}.txt".format(table["output-file"])
834     logging.info("    Writing file: '{0}'".format(txt_file_name))
835     convert_csv_to_pretty_txt(file_name, txt_file_name)
836
837
838 def table_failed_tests_html(table, input_data):
839     """Generate the table(s) with algorithm: table_failed_tests_html
840     specified in the specification file.
841
842     :param table: Table to generate.
843     :param input_data: Data to process.
844     :type table: pandas.Series
845     :type input_data: InputData
846     """
847
848     testbed = table.get("testbed", None)
849     if testbed is None:
850         logging.error("The testbed is not defined for the table '{0}'.".
851                       format(table.get("title", "")))
852         return
853
854     logging.info("  Generating the table {0} ...".
855                  format(table.get("title", "")))
856
857     try:
858         with open(table["input-file"], 'rb') as csv_file:
859             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
860             csv_lst = [item for item in csv_content]
861     except KeyError:
862         logging.warning("The input file is not defined.")
863         return
864     except csv.Error as err:
865         logging.warning("Not possible to process the file '{0}'.\n{1}".
866                         format(table["input-file"], err))
867         return
868
869     # Table:
870     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
871
872     # Table header:
873     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
874     for idx, item in enumerate(csv_lst[0]):
875         alignment = "left" if idx == 0 else "center"
876         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
877         th.text = item
878
879     # Rows:
880     colors = ("#e9f1fb", "#d4e4f7")
881     for r_idx, row in enumerate(csv_lst[1:]):
882         background = colors[r_idx % 2]
883         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
884
885         # Columns:
886         for c_idx, item in enumerate(row):
887             alignment = "left" if c_idx == 0 else "center"
888             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
889             # Name:
890             if c_idx == 0:
891                 url = _generate_url("../trending/", testbed, item)
892                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
893                 ref.text = item
894             else:
895                 td.text = item
896     try:
897         with open(table["output-file"], 'w') as html_file:
898             logging.info("    Writing file: '{0}'".format(table["output-file"]))
899             html_file.write(".. raw:: html\n\n\t")
900             html_file.write(ET.tostring(failed_tests))
901             html_file.write("\n\t<p><br><br></p>\n")
902     except KeyError:
903         logging.warning("The output file is not defined.")
904         return