CSIT-1402: Add VICs to trending
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("vat-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         if column["data"].split(" ")[1] in ("vat-history",
165                                                             "show-run"):
166                             col_data = replace(col_data, " |br| ", "",
167                                                maxreplace=1)
168                             col_data = " |prein| {0} |preout| ".\
169                                 format(col_data[:-5])
170                         row_lst.append('"{0}"'.format(col_data))
171                     except KeyError:
172                         row_lst.append("No data")
173                 table_lst.append(row_lst)
174
175         # Write the data to file
176         if table_lst:
177             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178                                             table["output-file-ext"])
179             logging.info("      Writing file: '{}'".format(file_name))
180             with open(file_name, "w") as file_handler:
181                 file_handler.write(",".join(header) + "\n")
182                 for item in table_lst:
183                     file_handler.write(",".join(item) + "\n")
184
185     logging.info("  Done.")
186
187
188 def table_performance_comparison(table, input_data):
189     """Generate the table(s) with algorithm: table_performance_comparison
190     specified in the specification file.
191
192     :param table: Table to generate.
193     :param input_data: Data to process.
194     :type table: pandas.Series
195     :type input_data: InputData
196     """
197
198     logging.info("  Generating the table {0} ...".
199                  format(table.get("title", "")))
200
201     # Transform the data
202     logging.info("    Creating the data set for the {0} '{1}'.".
203                  format(table.get("type", ""), table.get("title", "")))
204     data = input_data.filter_data(table, continue_on_error=True)
205
206     # Prepare the header of the tables
207     try:
208         header = ["Test case", ]
209
210         if table["include-tests"] == "MRR":
211             hdr_param = "Receive Rate"
212         else:
213             hdr_param = "Throughput"
214
215         history = table.get("history", None)
216         if history:
217             for item in history:
218                 header.extend(
219                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220                      "{0} Stdev [Mpps]".format(item["title"])])
221         header.extend(
222             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
226              "Delta [%]"])
227         header_str = ",".join(header) + "\n"
228     except (AttributeError, KeyError) as err:
229         logging.error("The model is invalid, missing parameter: {0}".
230                       format(err))
231         return
232
233     # Prepare data to the table:
234     tbl_dict = dict()
235     for job, builds in table["reference"]["data"].items():
236         for build in builds:
237             for tst_name, tst_data in data[job][str(build)].iteritems():
238                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
240                     replace("-ndrdisc", "").replace("-pdr", "").\
241                     replace("-ndr", "").\
242                     replace("1t1c", "1c").replace("2t1c", "1c").\
243                     replace("2t2c", "2c").replace("4t2c", "2c").\
244                     replace("4t4c", "4c").replace("8t4c", "4c")
245                 if "across topologies" in table["title"].lower():
246                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
247                 if tbl_dict.get(tst_name_mod, None) is None:
248                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
249                                             "-".join(tst_data["name"].
250                                                      split("-")[:-1]))
251                     if "across testbeds" in table["title"].lower() or \
252                             "across topologies" in table["title"].lower():
253                         name = name.\
254                             replace("1t1c", "1c").replace("2t1c", "1c").\
255                             replace("2t2c", "2c").replace("4t2c", "2c").\
256                             replace("4t4c", "4c").replace("8t4c", "4c")
257                     tbl_dict[tst_name_mod] = {"name": name,
258                                               "ref-data": list(),
259                                               "cmp-data": list()}
260                 try:
261                     # TODO: Re-work when NDRPDRDISC tests are not used
262                     if table["include-tests"] == "MRR":
263                         tbl_dict[tst_name_mod]["ref-data"]. \
264                             append(tst_data["result"]["receive-rate"].avg)
265                     elif table["include-tests"] == "PDR":
266                         if tst_data["type"] == "PDR":
267                             tbl_dict[tst_name_mod]["ref-data"]. \
268                                 append(tst_data["throughput"]["value"])
269                         elif tst_data["type"] == "NDRPDR":
270                             tbl_dict[tst_name_mod]["ref-data"].append(
271                                 tst_data["throughput"]["PDR"]["LOWER"])
272                     elif table["include-tests"] == "NDR":
273                         if tst_data["type"] == "NDR":
274                             tbl_dict[tst_name_mod]["ref-data"]. \
275                                 append(tst_data["throughput"]["value"])
276                         elif tst_data["type"] == "NDRPDR":
277                             tbl_dict[tst_name_mod]["ref-data"].append(
278                                 tst_data["throughput"]["NDR"]["LOWER"])
279                     else:
280                         continue
281                 except TypeError:
282                     pass  # No data in output.xml for this test
283
284     for job, builds in table["compare"]["data"].items():
285         for build in builds:
286             for tst_name, tst_data in data[job][str(build)].iteritems():
287                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
288                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
289                     replace("-ndrdisc", "").replace("-pdr", ""). \
290                     replace("-ndr", "").\
291                     replace("1t1c", "1c").replace("2t1c", "1c").\
292                     replace("2t2c", "2c").replace("4t2c", "2c").\
293                     replace("4t4c", "4c").replace("8t4c", "4c")
294                 if "across topologies" in table["title"].lower():
295                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
296                 try:
297                     # TODO: Re-work when NDRPDRDISC tests are not used
298                     if table["include-tests"] == "MRR":
299                         tbl_dict[tst_name_mod]["cmp-data"]. \
300                             append(tst_data["result"]["receive-rate"].avg)
301                     elif table["include-tests"] == "PDR":
302                         if tst_data["type"] == "PDR":
303                             tbl_dict[tst_name_mod]["cmp-data"]. \
304                                 append(tst_data["throughput"]["value"])
305                         elif tst_data["type"] == "NDRPDR":
306                             tbl_dict[tst_name_mod]["cmp-data"].append(
307                                 tst_data["throughput"]["PDR"]["LOWER"])
308                     elif table["include-tests"] == "NDR":
309                         if tst_data["type"] == "NDR":
310                             tbl_dict[tst_name_mod]["cmp-data"]. \
311                                 append(tst_data["throughput"]["value"])
312                         elif tst_data["type"] == "NDRPDR":
313                             tbl_dict[tst_name_mod]["cmp-data"].append(
314                                 tst_data["throughput"]["NDR"]["LOWER"])
315                     else:
316                         continue
317                 except KeyError:
318                     pass
319                 except TypeError:
320                     tbl_dict.pop(tst_name_mod, None)
321     if history:
322         for item in history:
323             for job, builds in item["data"].items():
324                 for build in builds:
325                     for tst_name, tst_data in data[job][str(build)].iteritems():
326                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
327                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
328                             replace("-ndrdisc", "").replace("-pdr", ""). \
329                             replace("-ndr", "").\
330                             replace("1t1c", "1c").replace("2t1c", "1c").\
331                             replace("2t2c", "2c").replace("4t2c", "2c").\
332                             replace("4t4c", "4c").replace("8t4c", "4c")
333                         if "across topologies" in table["title"].lower():
334                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
335                         if tbl_dict.get(tst_name_mod, None) is None:
336                             continue
337                         if tbl_dict[tst_name_mod].get("history", None) is None:
338                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
339                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
340                                                              None) is None:
341                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
342                                 list()
343                         try:
344                             # TODO: Re-work when NDRPDRDISC tests are not used
345                             if table["include-tests"] == "MRR":
346                                 tbl_dict[tst_name_mod]["history"][item["title"
347                                 ]].append(tst_data["result"]["receive-rate"].
348                                           avg)
349                             elif table["include-tests"] == "PDR":
350                                 if tst_data["type"] == "PDR":
351                                     tbl_dict[tst_name_mod]["history"][
352                                         item["title"]].\
353                                         append(tst_data["throughput"]["value"])
354                                 elif tst_data["type"] == "NDRPDR":
355                                     tbl_dict[tst_name_mod]["history"][item[
356                                         "title"]].append(tst_data["throughput"][
357                                         "PDR"]["LOWER"])
358                             elif table["include-tests"] == "NDR":
359                                 if tst_data["type"] == "NDR":
360                                     tbl_dict[tst_name_mod]["history"][
361                                         item["title"]].\
362                                         append(tst_data["throughput"]["value"])
363                                 elif tst_data["type"] == "NDRPDR":
364                                     tbl_dict[tst_name_mod]["history"][item[
365                                         "title"]].append(tst_data["throughput"][
366                                         "NDR"]["LOWER"])
367                             else:
368                                 continue
369                         except (TypeError, KeyError):
370                             pass
371
372     tbl_lst = list()
373     for tst_name in tbl_dict.keys():
374         item = [tbl_dict[tst_name]["name"], ]
375         if history:
376             if tbl_dict[tst_name].get("history", None) is not None:
377                 for hist_data in tbl_dict[tst_name]["history"].values():
378                     if hist_data:
379                         item.append(round(mean(hist_data) / 1000000, 2))
380                         item.append(round(stdev(hist_data) / 1000000, 2))
381                     else:
382                         item.extend([None, None])
383             else:
384                 item.extend([None, None])
385         data_t = tbl_dict[tst_name]["ref-data"]
386         if data_t:
387             item.append(round(mean(data_t) / 1000000, 2))
388             item.append(round(stdev(data_t) / 1000000, 2))
389         else:
390             item.extend([None, None])
391         data_t = tbl_dict[tst_name]["cmp-data"]
392         if data_t:
393             item.append(round(mean(data_t) / 1000000, 2))
394             item.append(round(stdev(data_t) / 1000000, 2))
395         else:
396             item.extend([None, None])
397         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
398             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
399         if len(item) == len(header):
400             tbl_lst.append(item)
401
402     # Sort the table according to the relative change
403     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
404
405     # Generate csv tables:
406     csv_file = "{0}.csv".format(table["output-file"])
407     with open(csv_file, "w") as file_handler:
408         file_handler.write(header_str)
409         for test in tbl_lst:
410             file_handler.write(",".join([str(item) for item in test]) + "\n")
411
412     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
413
414
415 def table_performance_trending_dashboard(table, input_data):
416     """Generate the table(s) with algorithm:
417     table_performance_trending_dashboard
418     specified in the specification file.
419
420     :param table: Table to generate.
421     :param input_data: Data to process.
422     :type table: pandas.Series
423     :type input_data: InputData
424     """
425
426     logging.info("  Generating the table {0} ...".
427                  format(table.get("title", "")))
428
429     # Transform the data
430     logging.info("    Creating the data set for the {0} '{1}'.".
431                  format(table.get("type", ""), table.get("title", "")))
432     data = input_data.filter_data(table, continue_on_error=True)
433
434     # Prepare the header of the tables
435     header = ["Test Case",
436               "Trend [Mpps]",
437               "Short-Term Change [%]",
438               "Long-Term Change [%]",
439               "Regressions [#]",
440               "Progressions [#]"
441               ]
442     header_str = ",".join(header) + "\n"
443
444     # Prepare data to the table:
445     tbl_dict = dict()
446     for job, builds in table["data"].items():
447         for build in builds:
448             for tst_name, tst_data in data[job][str(build)].iteritems():
449                 if tst_name.lower() in table["ignore-list"]:
450                     continue
451                 if tbl_dict.get(tst_name, None) is None:
452                     groups = re.search(REGEX_NIC, tst_data["parent"])
453                     if not groups:
454                         continue
455                     nic = groups.group(0)
456                     tbl_dict[tst_name] = {
457                         "name": "{0}-{1}".format(nic, tst_data["name"]),
458                         "data": OrderedDict()}
459                 try:
460                     tbl_dict[tst_name]["data"][str(build)] = \
461                         tst_data["result"]["receive-rate"]
462                 except (TypeError, KeyError):
463                     pass  # No data in output.xml for this test
464
465     tbl_lst = list()
466     for tst_name in tbl_dict.keys():
467         data_t = tbl_dict[tst_name]["data"]
468         if len(data_t) < 2:
469             continue
470
471         classification_lst, avgs = classify_anomalies(data_t)
472
473         win_size = min(len(data_t), table["window"])
474         long_win_size = min(len(data_t), table["long-trend-window"])
475
476         try:
477             max_long_avg = max(
478                 [x for x in avgs[-long_win_size:-win_size]
479                  if not isnan(x)])
480         except ValueError:
481             max_long_avg = nan
482         last_avg = avgs[-1]
483         avg_week_ago = avgs[max(-win_size, -len(avgs))]
484
485         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
486             rel_change_last = nan
487         else:
488             rel_change_last = round(
489                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
490
491         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
492             rel_change_long = nan
493         else:
494             rel_change_long = round(
495                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
496
497         if classification_lst:
498             if isnan(rel_change_last) and isnan(rel_change_long):
499                 continue
500             if (isnan(last_avg) or
501                 isnan(rel_change_last) or
502                 isnan(rel_change_long)):
503                 continue
504             tbl_lst.append(
505                 [tbl_dict[tst_name]["name"],
506                  round(last_avg / 1000000, 2),
507                  rel_change_last,
508                  rel_change_long,
509                  classification_lst[-win_size:].count("regression"),
510                  classification_lst[-win_size:].count("progression")])
511
512     tbl_lst.sort(key=lambda rel: rel[0])
513
514     tbl_sorted = list()
515     for nrr in range(table["window"], -1, -1):
516         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
517         for nrp in range(table["window"], -1, -1):
518             tbl_out = [item for item in tbl_reg if item[5] == nrp]
519             tbl_out.sort(key=lambda rel: rel[2])
520             tbl_sorted.extend(tbl_out)
521
522     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
523
524     logging.info("    Writing file: '{0}'".format(file_name))
525     with open(file_name, "w") as file_handler:
526         file_handler.write(header_str)
527         for test in tbl_sorted:
528             file_handler.write(",".join([str(item) for item in test]) + '\n')
529
530     txt_file_name = "{0}.txt".format(table["output-file"])
531     logging.info("    Writing file: '{0}'".format(txt_file_name))
532     convert_csv_to_pretty_txt(file_name, txt_file_name)
533
534
535 def _generate_url(base, testbed, test_name):
536     """Generate URL to a trending plot from the name of the test case.
537
538     :param base: The base part of URL common to all test cases.
539     :param testbed: The testbed used for testing.
540     :param test_name: The name of the test case.
541     :type base: str
542     :type testbed: str
543     :type test_name: str
544     :returns: The URL to the plot with the trending data for the given test
545         case.
546     :rtype str
547     """
548
549     url = base
550     file_name = ""
551     anchor = ".html#"
552     feature = ""
553
554     if "lbdpdk" in test_name or "lbvpp" in test_name:
555         file_name = "link_bonding"
556
557     elif "114b" in test_name and "vhost" in test_name:
558         file_name = "vts"
559
560     elif "testpmd" in test_name or "l3fwd" in test_name:
561         file_name = "dpdk"
562
563     elif "memif" in test_name:
564         file_name = "container_memif"
565         feature = "-base"
566
567     elif "srv6" in test_name:
568         file_name = "srv6"
569
570     elif "vhost" in test_name:
571         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
572             file_name = "vm_vhost_l2"
573             if "114b" in test_name:
574                 feature = ""
575             elif "l2xcbase" in test_name:
576                 feature = "-base-l2xc"
577             elif "l2bdbasemaclrn" in test_name:
578                 feature = "-base-l2bd"
579             else:
580                 feature = "-base"
581         elif "ip4base" in test_name:
582             file_name = "vm_vhost_ip4"
583             feature = "-base"
584
585     elif "ipsec" in test_name:
586         file_name = "ipsec"
587         feature = "-base-scale"
588
589     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
590         file_name = "ip4_tunnels"
591         feature = "-base"
592
593     elif "ip4base" in test_name or "ip4scale" in test_name:
594         file_name = "ip4"
595         if "xl710" in test_name:
596             feature = "-base-scale-features"
597         elif "iacl" in test_name:
598             feature = "-features-iacl"
599         elif "oacl" in test_name:
600             feature = "-features-oacl"
601         elif "snat" in test_name or "cop" in test_name:
602             feature = "-features"
603         else:
604             feature = "-base-scale"
605
606     elif "ip6base" in test_name or "ip6scale" in test_name:
607         file_name = "ip6"
608         feature = "-base-scale"
609
610     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
611             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
612             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
613         file_name = "l2"
614         if "macip" in test_name:
615             feature = "-features-macip"
616         elif "iacl" in test_name:
617             feature = "-features-iacl"
618         elif "oacl" in test_name:
619             feature = "-features-oacl"
620         else:
621             feature = "-base-scale"
622
623     if "x520" in test_name:
624         nic = "x520-"
625     elif "x710" in test_name:
626         nic = "x710-"
627     elif "xl710" in test_name:
628         nic = "xl710-"
629     elif "xxv710" in test_name:
630         nic = "xxv710-"
631     elif "vic1227" in test_name:
632         nic = "vic1227-"
633     elif "vic1385" in test_name:
634         nic = "vic1385-"
635     else:
636         nic = ""
637     anchor += nic
638
639     if "64b" in test_name:
640         framesize = "64b"
641     elif "78b" in test_name:
642         framesize = "78b"
643     elif "imix" in test_name:
644         framesize = "imix"
645     elif "9000b" in test_name:
646         framesize = "9000b"
647     elif "1518b" in test_name:
648         framesize = "1518b"
649     elif "114b" in test_name:
650         framesize = "114b"
651     else:
652         framesize = ""
653     anchor += framesize + '-'
654
655     if "1t1c" in test_name:
656         anchor += "1t1c"
657     elif "2t2c" in test_name:
658         anchor += "2t2c"
659     elif "4t4c" in test_name:
660         anchor += "4t4c"
661     elif "2t1c" in test_name:
662         anchor += "2t1c"
663     elif "4t2c" in test_name:
664         anchor += "4t2c"
665     elif "8t4c" in test_name:
666         anchor += "8t4c"
667
668     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
669            anchor + feature
670
671
672 def table_performance_trending_dashboard_html(table, input_data):
673     """Generate the table(s) with algorithm:
674     table_performance_trending_dashboard_html specified in the specification
675     file.
676
677     :param table: Table to generate.
678     :param input_data: Data to process.
679     :type table: dict
680     :type input_data: InputData
681     """
682
683     testbed = table.get("testbed", None)
684     if testbed is None:
685         logging.error("The testbed is not defined for the table '{0}'.".
686                       format(table.get("title", "")))
687         return
688
689     logging.info("  Generating the table {0} ...".
690                  format(table.get("title", "")))
691
692     try:
693         with open(table["input-file"], 'rb') as csv_file:
694             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
695             csv_lst = [item for item in csv_content]
696     except KeyError:
697         logging.warning("The input file is not defined.")
698         return
699     except csv.Error as err:
700         logging.warning("Not possible to process the file '{0}'.\n{1}".
701                         format(table["input-file"], err))
702         return
703
704     # Table:
705     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
706
707     # Table header:
708     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
709     for idx, item in enumerate(csv_lst[0]):
710         alignment = "left" if idx == 0 else "center"
711         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
712         th.text = item
713
714     # Rows:
715     colors = {"regression": ("#ffcccc", "#ff9999"),
716               "progression": ("#c6ecc6", "#9fdf9f"),
717               "normal": ("#e9f1fb", "#d4e4f7")}
718     for r_idx, row in enumerate(csv_lst[1:]):
719         if int(row[4]):
720             color = "regression"
721         elif int(row[5]):
722             color = "progression"
723         else:
724             color = "normal"
725         background = colors[color][r_idx % 2]
726         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
727
728         # Columns:
729         for c_idx, item in enumerate(row):
730             alignment = "left" if c_idx == 0 else "center"
731             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
732             # Name:
733             if c_idx == 0:
734                 url = _generate_url("../trending/", testbed, item)
735                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
736                 ref.text = item
737             else:
738                 td.text = item
739     try:
740         with open(table["output-file"], 'w') as html_file:
741             logging.info("    Writing file: '{0}'".format(table["output-file"]))
742             html_file.write(".. raw:: html\n\n\t")
743             html_file.write(ET.tostring(dashboard))
744             html_file.write("\n\t<p><br><br></p>\n")
745     except KeyError:
746         logging.warning("The output file is not defined.")
747         return
748
749
750 def table_failed_tests(table, input_data):
751     """Generate the table(s) with algorithm: table_failed_tests
752     specified in the specification file.
753
754     :param table: Table to generate.
755     :param input_data: Data to process.
756     :type table: pandas.Series
757     :type input_data: InputData
758     """
759
760     logging.info("  Generating the table {0} ...".
761                  format(table.get("title", "")))
762
763     # Transform the data
764     logging.info("    Creating the data set for the {0} '{1}'.".
765                  format(table.get("type", ""), table.get("title", "")))
766     data = input_data.filter_data(table, continue_on_error=True)
767
768     # Prepare the header of the tables
769     header = ["Test Case",
770               "Failures [#]",
771               "Last Failure [Time]",
772               "Last Failure [VPP-Build-Id]",
773               "Last Failure [CSIT-Job-Build-Id]"]
774
775     # Generate the data for the table according to the model in the table
776     # specification
777
778     now = dt.utcnow()
779     timeperiod = timedelta(int(table.get("window", 7)))
780
781     tbl_dict = dict()
782     for job, builds in table["data"].items():
783         for build in builds:
784             build = str(build)
785             for tst_name, tst_data in data[job][build].iteritems():
786                 if tst_name.lower() in table["ignore-list"]:
787                     continue
788                 if tbl_dict.get(tst_name, None) is None:
789                     groups = re.search(REGEX_NIC, tst_data["parent"])
790                     if not groups:
791                         continue
792                     nic = groups.group(0)
793                     tbl_dict[tst_name] = {
794                         "name": "{0}-{1}".format(nic, tst_data["name"]),
795                         "data": OrderedDict()}
796                 try:
797                     generated = input_data.metadata(job, build).\
798                         get("generated", "")
799                     if not generated:
800                         continue
801                     then = dt.strptime(generated, "%Y%m%d %H:%M")
802                     if (now - then) <= timeperiod:
803                         tbl_dict[tst_name]["data"][build] = (
804                             tst_data["status"],
805                             generated,
806                             input_data.metadata(job, build).get("version", ""),
807                             build)
808                 except (TypeError, KeyError) as err:
809                     logging.warning("tst_name: {} - err: {}".
810                                     format(tst_name, repr(err)))
811
812     max_fails = 0
813     tbl_lst = list()
814     for tst_data in tbl_dict.values():
815         fails_nr = 0
816         for val in tst_data["data"].values():
817             if val[0] == "FAIL":
818                 fails_nr += 1
819                 fails_last_date = val[1]
820                 fails_last_vpp = val[2]
821                 fails_last_csit = val[3]
822         if fails_nr:
823             max_fails = fails_nr if fails_nr > max_fails else max_fails
824             tbl_lst.append([tst_data["name"],
825                             fails_nr,
826                             fails_last_date,
827                             fails_last_vpp,
828                             "mrr-daily-build-{0}".format(fails_last_csit)])
829
830     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
831     tbl_sorted = list()
832     for nrf in range(max_fails, -1, -1):
833         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
834         tbl_sorted.extend(tbl_fails)
835     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
836
837     logging.info("    Writing file: '{0}'".format(file_name))
838     with open(file_name, "w") as file_handler:
839         file_handler.write(",".join(header) + "\n")
840         for test in tbl_sorted:
841             file_handler.write(",".join([str(item) for item in test]) + '\n')
842
843     txt_file_name = "{0}.txt".format(table["output-file"])
844     logging.info("    Writing file: '{0}'".format(txt_file_name))
845     convert_csv_to_pretty_txt(file_name, txt_file_name)
846
847
848 def table_failed_tests_html(table, input_data):
849     """Generate the table(s) with algorithm: table_failed_tests_html
850     specified in the specification file.
851
852     :param table: Table to generate.
853     :param input_data: Data to process.
854     :type table: pandas.Series
855     :type input_data: InputData
856     """
857
858     testbed = table.get("testbed", None)
859     if testbed is None:
860         logging.error("The testbed is not defined for the table '{0}'.".
861                       format(table.get("title", "")))
862         return
863
864     logging.info("  Generating the table {0} ...".
865                  format(table.get("title", "")))
866
867     try:
868         with open(table["input-file"], 'rb') as csv_file:
869             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
870             csv_lst = [item for item in csv_content]
871     except KeyError:
872         logging.warning("The input file is not defined.")
873         return
874     except csv.Error as err:
875         logging.warning("Not possible to process the file '{0}'.\n{1}".
876                         format(table["input-file"], err))
877         return
878
879     # Table:
880     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
881
882     # Table header:
883     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
884     for idx, item in enumerate(csv_lst[0]):
885         alignment = "left" if idx == 0 else "center"
886         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
887         th.text = item
888
889     # Rows:
890     colors = ("#e9f1fb", "#d4e4f7")
891     for r_idx, row in enumerate(csv_lst[1:]):
892         background = colors[r_idx % 2]
893         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
894
895         # Columns:
896         for c_idx, item in enumerate(row):
897             alignment = "left" if c_idx == 0 else "center"
898             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
899             # Name:
900             if c_idx == 0:
901                 url = _generate_url("../trending/", testbed, item)
902                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
903                 ref.text = item
904             else:
905                 td.text = item
906     try:
907         with open(table["output-file"], 'w') as html_file:
908             logging.info("    Writing file: '{0}'".format(table["output-file"]))
909             html_file.write(".. raw:: html\n\n\t")
910             html_file.write(ET.tostring(failed_tests))
911             html_file.write("\n\t<p><br><br></p>\n")
912     except KeyError:
913         logging.warning("The output file is not defined.")
914         return