77243744f9997278d7136e0ac4c3a6006ae650c1
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("vat-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         if column["data"].split(" ")[1] in ("vat-history",
165                                                             "show-run"):
166                             col_data = replace(col_data, " |br| ", "",
167                                                maxreplace=1)
168                             col_data = " |prein| {0} |preout| ".\
169                                 format(col_data[:-5])
170                         row_lst.append('"{0}"'.format(col_data))
171                     except KeyError:
172                         row_lst.append("No data")
173                 table_lst.append(row_lst)
174
175         # Write the data to file
176         if table_lst:
177             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178                                             table["output-file-ext"])
179             logging.info("      Writing file: '{}'".format(file_name))
180             with open(file_name, "w") as file_handler:
181                 file_handler.write(",".join(header) + "\n")
182                 for item in table_lst:
183                     file_handler.write(",".join(item) + "\n")
184
185     logging.info("  Done.")
186
187
188 def table_performance_comparison(table, input_data):
189     """Generate the table(s) with algorithm: table_performance_comparison
190     specified in the specification file.
191
192     :param table: Table to generate.
193     :param input_data: Data to process.
194     :type table: pandas.Series
195     :type input_data: InputData
196     """
197
198     logging.info("  Generating the table {0} ...".
199                  format(table.get("title", "")))
200
201     # Transform the data
202     logging.info("    Creating the data set for the {0} '{1}'.".
203                  format(table.get("type", ""), table.get("title", "")))
204     data = input_data.filter_data(table, continue_on_error=True)
205
206     # Prepare the header of the tables
207     try:
208         header = ["Test case", ]
209
210         if table["include-tests"] == "MRR":
211             hdr_param = "Receive Rate"
212         else:
213             hdr_param = "Throughput"
214
215         history = table.get("history", None)
216         if history:
217             for item in history:
218                 header.extend(
219                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220                      "{0} Stdev [Mpps]".format(item["title"])])
221         header.extend(
222             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
226              "Delta [%]"])
227         header_str = ",".join(header) + "\n"
228     except (AttributeError, KeyError) as err:
229         logging.error("The model is invalid, missing parameter: {0}".
230                       format(err))
231         return
232
233     # Prepare data to the table:
234     tbl_dict = dict()
235     for job, builds in table["reference"]["data"].items():
236         for build in builds:
237             for tst_name, tst_data in data[job][str(build)].iteritems():
238                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
240                     replace("-ndrdisc", "").replace("-pdr", "").\
241                     replace("-ndr", "").\
242                     replace("1t1c", "1c").replace("2t1c", "1c").\
243                     replace("2t2c", "2c").replace("4t2c", "2c").\
244                     replace("4t4c", "4c").replace("8t4c", "4c")
245                 if "across topologies" in table["title"].lower():
246                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
247                 if tbl_dict.get(tst_name_mod, None) is None:
248                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
249                                             "-".join(tst_data["name"].
250                                                      split("-")[:-1]))
251                     if "across testbeds" in table["title"].lower() or \
252                             "across topologies" in table["title"].lower():
253                         name = name.\
254                             replace("1t1c", "1c").replace("2t1c", "1c").\
255                             replace("2t2c", "2c").replace("4t2c", "2c").\
256                             replace("4t4c", "4c").replace("8t4c", "4c")
257                     tbl_dict[tst_name_mod] = {"name": name,
258                                               "ref-data": list(),
259                                               "cmp-data": list()}
260                 try:
261                     # TODO: Re-work when NDRPDRDISC tests are not used
262                     if table["include-tests"] == "MRR":
263                         tbl_dict[tst_name_mod]["ref-data"]. \
264                             append(tst_data["result"]["receive-rate"].avg)
265                     elif table["include-tests"] == "PDR":
266                         if tst_data["type"] == "PDR":
267                             tbl_dict[tst_name_mod]["ref-data"]. \
268                                 append(tst_data["throughput"]["value"])
269                         elif tst_data["type"] == "NDRPDR":
270                             tbl_dict[tst_name_mod]["ref-data"].append(
271                                 tst_data["throughput"]["PDR"]["LOWER"])
272                     elif table["include-tests"] == "NDR":
273                         if tst_data["type"] == "NDR":
274                             tbl_dict[tst_name_mod]["ref-data"]. \
275                                 append(tst_data["throughput"]["value"])
276                         elif tst_data["type"] == "NDRPDR":
277                             tbl_dict[tst_name_mod]["ref-data"].append(
278                                 tst_data["throughput"]["NDR"]["LOWER"])
279                     else:
280                         continue
281                 except TypeError:
282                     pass  # No data in output.xml for this test
283
284     for job, builds in table["compare"]["data"].items():
285         for build in builds:
286             for tst_name, tst_data in data[job][str(build)].iteritems():
287                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
288                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
289                     replace("-ndrdisc", "").replace("-pdr", ""). \
290                     replace("-ndr", "").\
291                     replace("1t1c", "1c").replace("2t1c", "1c").\
292                     replace("2t2c", "2c").replace("4t2c", "2c").\
293                     replace("4t4c", "4c").replace("8t4c", "4c")
294                 if "across topologies" in table["title"].lower():
295                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
296                 try:
297                     # TODO: Re-work when NDRPDRDISC tests are not used
298                     if table["include-tests"] == "MRR":
299                         tbl_dict[tst_name_mod]["cmp-data"]. \
300                             append(tst_data["result"]["receive-rate"].avg)
301                     elif table["include-tests"] == "PDR":
302                         if tst_data["type"] == "PDR":
303                             tbl_dict[tst_name_mod]["cmp-data"]. \
304                                 append(tst_data["throughput"]["value"])
305                         elif tst_data["type"] == "NDRPDR":
306                             tbl_dict[tst_name_mod]["cmp-data"].append(
307                                 tst_data["throughput"]["PDR"]["LOWER"])
308                     elif table["include-tests"] == "NDR":
309                         if tst_data["type"] == "NDR":
310                             tbl_dict[tst_name_mod]["cmp-data"]. \
311                                 append(tst_data["throughput"]["value"])
312                         elif tst_data["type"] == "NDRPDR":
313                             tbl_dict[tst_name_mod]["cmp-data"].append(
314                                 tst_data["throughput"]["NDR"]["LOWER"])
315                     else:
316                         continue
317                 except KeyError:
318                     pass
319                 except TypeError:
320                     tbl_dict.pop(tst_name_mod, None)
321     if history:
322         for item in history:
323             for job, builds in item["data"].items():
324                 for build in builds:
325                     for tst_name, tst_data in data[job][str(build)].iteritems():
326                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
327                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
328                             replace("-ndrdisc", "").replace("-pdr", ""). \
329                             replace("-ndr", "").\
330                             replace("1t1c", "1c").replace("2t1c", "1c").\
331                             replace("2t2c", "2c").replace("4t2c", "2c").\
332                             replace("4t4c", "4c").replace("8t4c", "4c")
333                         if "across topologies" in table["title"].lower():
334                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
335                         if tbl_dict.get(tst_name_mod, None) is None:
336                             continue
337                         if tbl_dict[tst_name_mod].get("history", None) is None:
338                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
339                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
340                                                              None) is None:
341                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
342                                 list()
343                         try:
344                             # TODO: Re-work when NDRPDRDISC tests are not used
345                             if table["include-tests"] == "MRR":
346                                 tbl_dict[tst_name_mod]["history"][item["title"
347                                 ]].append(tst_data["result"]["receive-rate"].
348                                           avg)
349                             elif table["include-tests"] == "PDR":
350                                 if tst_data["type"] == "PDR":
351                                     tbl_dict[tst_name_mod]["history"][
352                                         item["title"]].\
353                                         append(tst_data["throughput"]["value"])
354                                 elif tst_data["type"] == "NDRPDR":
355                                     tbl_dict[tst_name_mod]["history"][item[
356                                         "title"]].append(tst_data["throughput"][
357                                         "PDR"]["LOWER"])
358                             elif table["include-tests"] == "NDR":
359                                 if tst_data["type"] == "NDR":
360                                     tbl_dict[tst_name_mod]["history"][
361                                         item["title"]].\
362                                         append(tst_data["throughput"]["value"])
363                                 elif tst_data["type"] == "NDRPDR":
364                                     tbl_dict[tst_name_mod]["history"][item[
365                                         "title"]].append(tst_data["throughput"][
366                                         "NDR"]["LOWER"])
367                             else:
368                                 continue
369                         except (TypeError, KeyError):
370                             pass
371
372     tbl_lst = list()
373     for tst_name in tbl_dict.keys():
374         item = [tbl_dict[tst_name]["name"], ]
375         if history:
376             if tbl_dict[tst_name].get("history", None) is not None:
377                 for hist_data in tbl_dict[tst_name]["history"].values():
378                     if hist_data:
379                         item.append(round(mean(hist_data) / 1000000, 2))
380                         item.append(round(stdev(hist_data) / 1000000, 2))
381                     else:
382                         item.extend([None, None])
383             else:
384                 item.extend([None, None])
385         data_t = tbl_dict[tst_name]["ref-data"]
386         if data_t:
387             item.append(round(mean(data_t) / 1000000, 2))
388             item.append(round(stdev(data_t) / 1000000, 2))
389         else:
390             item.extend([None, None])
391         data_t = tbl_dict[tst_name]["cmp-data"]
392         if data_t:
393             item.append(round(mean(data_t) / 1000000, 2))
394             item.append(round(stdev(data_t) / 1000000, 2))
395         else:
396             item.extend([None, None])
397         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
398             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
399         if len(item) == len(header):
400             tbl_lst.append(item)
401
402     # Sort the table according to the relative change
403     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
404
405     # Generate csv tables:
406     csv_file = "{0}.csv".format(table["output-file"])
407     with open(csv_file, "w") as file_handler:
408         file_handler.write(header_str)
409         for test in tbl_lst:
410             file_handler.write(",".join([str(item) for item in test]) + "\n")
411
412     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
413
414
415 def table_performance_trending_dashboard(table, input_data):
416     """Generate the table(s) with algorithm:
417     table_performance_trending_dashboard
418     specified in the specification file.
419
420     :param table: Table to generate.
421     :param input_data: Data to process.
422     :type table: pandas.Series
423     :type input_data: InputData
424     """
425
426     logging.info("  Generating the table {0} ...".
427                  format(table.get("title", "")))
428
429     # Transform the data
430     logging.info("    Creating the data set for the {0} '{1}'.".
431                  format(table.get("type", ""), table.get("title", "")))
432     data = input_data.filter_data(table, continue_on_error=True)
433
434     # Prepare the header of the tables
435     header = ["Test Case",
436               "Trend [Mpps]",
437               "Short-Term Change [%]",
438               "Long-Term Change [%]",
439               "Regressions [#]",
440               "Progressions [#]"
441               ]
442     header_str = ",".join(header) + "\n"
443
444     # Prepare data to the table:
445     tbl_dict = dict()
446     for job, builds in table["data"].items():
447         for build in builds:
448             for tst_name, tst_data in data[job][str(build)].iteritems():
449                 if tst_name.lower() in table["ignore-list"]:
450                     continue
451                 if tbl_dict.get(tst_name, None) is None:
452                     groups = re.search(REGEX_NIC, tst_data["parent"])
453                     if not groups:
454                         continue
455                     nic = groups.group(0)
456                     tbl_dict[tst_name] = {
457                         "name": "{0}-{1}".format(nic, tst_data["name"]),
458                         "data": OrderedDict()}
459                 try:
460                     tbl_dict[tst_name]["data"][str(build)] = \
461                         tst_data["result"]["receive-rate"]
462                 except (TypeError, KeyError):
463                     pass  # No data in output.xml for this test
464
465     tbl_lst = list()
466     for tst_name in tbl_dict.keys():
467         data_t = tbl_dict[tst_name]["data"]
468         if len(data_t) < 2:
469             continue
470
471         classification_lst, avgs = classify_anomalies(data_t)
472
473         win_size = min(len(data_t), table["window"])
474         long_win_size = min(len(data_t), table["long-trend-window"])
475
476         try:
477             max_long_avg = max(
478                 [x for x in avgs[-long_win_size:-win_size]
479                  if not isnan(x)])
480         except ValueError:
481             max_long_avg = nan
482         last_avg = avgs[-1]
483         avg_week_ago = avgs[max(-win_size, -len(avgs))]
484
485         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
486             rel_change_last = nan
487         else:
488             rel_change_last = round(
489                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
490
491         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
492             rel_change_long = nan
493         else:
494             rel_change_long = round(
495                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
496
497         if classification_lst:
498             if isnan(rel_change_last) and isnan(rel_change_long):
499                 continue
500             if (isnan(last_avg) or
501                 isnan(rel_change_last) or
502                 isnan(rel_change_long)):
503                 continue
504             tbl_lst.append(
505                 [tbl_dict[tst_name]["name"],
506                  round(last_avg / 1000000, 2),
507                  rel_change_last,
508                  rel_change_long,
509                  classification_lst[-win_size:].count("regression"),
510                  classification_lst[-win_size:].count("progression")])
511
512     tbl_lst.sort(key=lambda rel: rel[0])
513
514     tbl_sorted = list()
515     for nrr in range(table["window"], -1, -1):
516         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
517         for nrp in range(table["window"], -1, -1):
518             tbl_out = [item for item in tbl_reg if item[5] == nrp]
519             tbl_out.sort(key=lambda rel: rel[2])
520             tbl_sorted.extend(tbl_out)
521
522     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
523
524     logging.info("    Writing file: '{0}'".format(file_name))
525     with open(file_name, "w") as file_handler:
526         file_handler.write(header_str)
527         for test in tbl_sorted:
528             file_handler.write(",".join([str(item) for item in test]) + '\n')
529
530     txt_file_name = "{0}.txt".format(table["output-file"])
531     logging.info("    Writing file: '{0}'".format(txt_file_name))
532     convert_csv_to_pretty_txt(file_name, txt_file_name)
533
534
535 def _generate_url(base, testbed, test_name):
536     """Generate URL to a trending plot from the name of the test case.
537
538     :param base: The base part of URL common to all test cases.
539     :param testbed: The testbed used for testing.
540     :param test_name: The name of the test case.
541     :type base: str
542     :type testbed: str
543     :type test_name: str
544     :returns: The URL to the plot with the trending data for the given test
545         case.
546     :rtype str
547     """
548
549     url = base
550     file_name = ""
551     anchor = ".html#"
552     feature = ""
553
554     if "lbdpdk" in test_name or "lbvpp" in test_name:
555         file_name = "link_bonding"
556
557     elif "114b" in test_name and "vhost" in test_name:
558         file_name = "vts"
559
560     elif "testpmd" in test_name or "l3fwd" in test_name:
561         file_name = "dpdk"
562
563     elif "memif" in test_name:
564         file_name = "container_memif"
565         feature = "-base"
566
567     elif "srv6" in test_name:
568         file_name = "srv6"
569
570     elif "vhost" in test_name:
571         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
572             file_name = "vm_vhost_l2"
573             if "114b" in test_name:
574                 feature = ""
575             elif "l2xcbase" in test_name:
576                 feature = "-base-l2xc"
577             elif "l2bdbasemaclrn" in test_name:
578                 feature = "-base-l2bd"
579             else:
580                 feature = "-base"
581         elif "ip4base" in test_name:
582             file_name = "vm_vhost_ip4"
583             feature = "-base"
584
585     elif "ipsec" in test_name:
586         file_name = "ipsec"
587         feature = "-base-scale"
588
589     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
590         file_name = "ip4_tunnels"
591         feature = "-base"
592
593     elif "ip4base" in test_name or "ip4scale" in test_name:
594         file_name = "ip4"
595         if "xl710" in test_name:
596             feature = "-base-scale-features"
597         elif "iacl" in test_name:
598             feature = "-features-iacl"
599         elif "oacl" in test_name:
600             feature = "-features-oacl"
601         elif "snat" in test_name or "cop" in test_name:
602             feature = "-features"
603         else:
604             feature = "-base-scale"
605
606     elif "ip6base" in test_name or "ip6scale" in test_name:
607         file_name = "ip6"
608         feature = "-base-scale"
609
610     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
611             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
612             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
613         file_name = "l2"
614         if "macip" in test_name:
615             feature = "-features-macip"
616         elif "iacl" in test_name:
617             feature = "-features-iacl"
618         elif "oacl" in test_name:
619             feature = "-features-oacl"
620         else:
621             feature = "-base-scale"
622
623     if "x520" in test_name:
624         nic = "x520-"
625     elif "x710" in test_name:
626         nic = "x710-"
627     elif "xl710" in test_name:
628         nic = "xl710-"
629     elif "xxv710" in test_name:
630         nic = "xxv710-"
631     else:
632         nic = ""
633     anchor += nic
634
635     if "64b" in test_name:
636         framesize = "64b"
637     elif "78b" in test_name:
638         framesize = "78b"
639     elif "imix" in test_name:
640         framesize = "imix"
641     elif "9000b" in test_name:
642         framesize = "9000b"
643     elif "1518b" in test_name:
644         framesize = "1518b"
645     elif "114b" in test_name:
646         framesize = "114b"
647     else:
648         framesize = ""
649     anchor += framesize + '-'
650
651     if "1t1c" in test_name:
652         anchor += "1t1c"
653     elif "2t2c" in test_name:
654         anchor += "2t2c"
655     elif "4t4c" in test_name:
656         anchor += "4t4c"
657     elif "2t1c" in test_name:
658         anchor += "2t1c"
659     elif "4t2c" in test_name:
660         anchor += "4t2c"
661     elif "8t4c" in test_name:
662         anchor += "8t4c"
663
664     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
665            anchor + feature
666
667
668 def table_performance_trending_dashboard_html(table, input_data):
669     """Generate the table(s) with algorithm:
670     table_performance_trending_dashboard_html specified in the specification
671     file.
672
673     :param table: Table to generate.
674     :param input_data: Data to process.
675     :type table: dict
676     :type input_data: InputData
677     """
678
679     testbed = table.get("testbed", None)
680     if testbed is None:
681         logging.error("The testbed is not defined for the table '{0}'.".
682                       format(table.get("title", "")))
683         return
684
685     logging.info("  Generating the table {0} ...".
686                  format(table.get("title", "")))
687
688     try:
689         with open(table["input-file"], 'rb') as csv_file:
690             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
691             csv_lst = [item for item in csv_content]
692     except KeyError:
693         logging.warning("The input file is not defined.")
694         return
695     except csv.Error as err:
696         logging.warning("Not possible to process the file '{0}'.\n{1}".
697                         format(table["input-file"], err))
698         return
699
700     # Table:
701     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
702
703     # Table header:
704     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
705     for idx, item in enumerate(csv_lst[0]):
706         alignment = "left" if idx == 0 else "center"
707         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
708         th.text = item
709
710     # Rows:
711     colors = {"regression": ("#ffcccc", "#ff9999"),
712               "progression": ("#c6ecc6", "#9fdf9f"),
713               "normal": ("#e9f1fb", "#d4e4f7")}
714     for r_idx, row in enumerate(csv_lst[1:]):
715         if int(row[4]):
716             color = "regression"
717         elif int(row[5]):
718             color = "progression"
719         else:
720             color = "normal"
721         background = colors[color][r_idx % 2]
722         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
723
724         # Columns:
725         for c_idx, item in enumerate(row):
726             alignment = "left" if c_idx == 0 else "center"
727             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
728             # Name:
729             if c_idx == 0:
730                 url = _generate_url("../trending/", testbed, item)
731                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
732                 ref.text = item
733             else:
734                 td.text = item
735     try:
736         with open(table["output-file"], 'w') as html_file:
737             logging.info("    Writing file: '{0}'".format(table["output-file"]))
738             html_file.write(".. raw:: html\n\n\t")
739             html_file.write(ET.tostring(dashboard))
740             html_file.write("\n\t<p><br><br></p>\n")
741     except KeyError:
742         logging.warning("The output file is not defined.")
743         return
744
745
746 def table_failed_tests(table, input_data):
747     """Generate the table(s) with algorithm: table_failed_tests
748     specified in the specification file.
749
750     :param table: Table to generate.
751     :param input_data: Data to process.
752     :type table: pandas.Series
753     :type input_data: InputData
754     """
755
756     logging.info("  Generating the table {0} ...".
757                  format(table.get("title", "")))
758
759     # Transform the data
760     logging.info("    Creating the data set for the {0} '{1}'.".
761                  format(table.get("type", ""), table.get("title", "")))
762     data = input_data.filter_data(table, continue_on_error=True)
763
764     # Prepare the header of the tables
765     header = ["Test Case",
766               "Failures [#]",
767               "Last Failure [Time]",
768               "Last Failure [VPP-Build-Id]",
769               "Last Failure [CSIT-Job-Build-Id]"]
770
771     # Generate the data for the table according to the model in the table
772     # specification
773
774     now = dt.utcnow()
775     timeperiod = timedelta(int(table.get("window", 7)))
776
777     tbl_dict = dict()
778     for job, builds in table["data"].items():
779         for build in builds:
780             build = str(build)
781             for tst_name, tst_data in data[job][build].iteritems():
782                 if tst_name.lower() in table["ignore-list"]:
783                     continue
784                 if tbl_dict.get(tst_name, None) is None:
785                     groups = re.search(REGEX_NIC, tst_data["parent"])
786                     if not groups:
787                         continue
788                     nic = groups.group(0)
789                     tbl_dict[tst_name] = {
790                         "name": "{0}-{1}".format(nic, tst_data["name"]),
791                         "data": OrderedDict()}
792                 try:
793                     generated = input_data.metadata(job, build).\
794                         get("generated", "")
795                     if not generated:
796                         continue
797                     then = dt.strptime(generated, "%Y%m%d %H:%M")
798                     if (now - then) <= timeperiod:
799                         tbl_dict[tst_name]["data"][build] = (
800                             tst_data["status"],
801                             generated,
802                             input_data.metadata(job, build).get("version", ""),
803                             build)
804                 except (TypeError, KeyError) as err:
805                     logging.warning("tst_name: {} - err: {}".
806                                     format(tst_name, repr(err)))
807
808     max_fails = 0
809     tbl_lst = list()
810     for tst_data in tbl_dict.values():
811         fails_nr = 0
812         for val in tst_data["data"].values():
813             if val[0] == "FAIL":
814                 fails_nr += 1
815                 fails_last_date = val[1]
816                 fails_last_vpp = val[2]
817                 fails_last_csit = val[3]
818         if fails_nr:
819             max_fails = fails_nr if fails_nr > max_fails else max_fails
820             tbl_lst.append([tst_data["name"],
821                             fails_nr,
822                             fails_last_date,
823                             fails_last_vpp,
824                             "mrr-daily-build-{0}".format(fails_last_csit)])
825
826     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
827     tbl_sorted = list()
828     for nrf in range(max_fails, -1, -1):
829         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
830         tbl_sorted.extend(tbl_fails)
831     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
832
833     logging.info("    Writing file: '{0}'".format(file_name))
834     with open(file_name, "w") as file_handler:
835         file_handler.write(",".join(header) + "\n")
836         for test in tbl_sorted:
837             file_handler.write(",".join([str(item) for item in test]) + '\n')
838
839     txt_file_name = "{0}.txt".format(table["output-file"])
840     logging.info("    Writing file: '{0}'".format(txt_file_name))
841     convert_csv_to_pretty_txt(file_name, txt_file_name)
842
843
844 def table_failed_tests_html(table, input_data):
845     """Generate the table(s) with algorithm: table_failed_tests_html
846     specified in the specification file.
847
848     :param table: Table to generate.
849     :param input_data: Data to process.
850     :type table: pandas.Series
851     :type input_data: InputData
852     """
853
854     testbed = table.get("testbed", None)
855     if testbed is None:
856         logging.error("The testbed is not defined for the table '{0}'.".
857                       format(table.get("title", "")))
858         return
859
860     logging.info("  Generating the table {0} ...".
861                  format(table.get("title", "")))
862
863     try:
864         with open(table["input-file"], 'rb') as csv_file:
865             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
866             csv_lst = [item for item in csv_content]
867     except KeyError:
868         logging.warning("The input file is not defined.")
869         return
870     except csv.Error as err:
871         logging.warning("Not possible to process the file '{0}'.\n{1}".
872                         format(table["input-file"], err))
873         return
874
875     # Table:
876     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
877
878     # Table header:
879     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
880     for idx, item in enumerate(csv_lst[0]):
881         alignment = "left" if idx == 0 else "center"
882         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
883         th.text = item
884
885     # Rows:
886     colors = ("#e9f1fb", "#d4e4f7")
887     for r_idx, row in enumerate(csv_lst[1:]):
888         background = colors[r_idx % 2]
889         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
890
891         # Columns:
892         for c_idx, item in enumerate(row):
893             alignment = "left" if c_idx == 0 else "center"
894             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
895             # Name:
896             if c_idx == 0:
897                 url = _generate_url("../trending/", testbed, item)
898                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
899                 ref.text = item
900             else:
901                 td.text = item
902     try:
903         with open(table["output-file"], 'w') as html_file:
904             logging.info("    Writing file: '{0}'".format(table["output-file"]))
905             html_file.write(".. raw:: html\n\n\t")
906             html_file.write(ET.tostring(failed_tests))
907             html_file.write("\n\t<p><br><br></p>\n")
908     except KeyError:
909         logging.warning("The output file is not defined.")
910         return