CSIT-1500: Add comparison table for SOAK vs NDRPDR
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
28
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30     convert_csv_to_pretty_txt
31
32
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34
35
36 def generate_tables(spec, data):
37     """Generate all tables specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     logging.info("Generating the tables ...")
46     for table in spec.tables:
47         try:
48             eval(table["algorithm"])(table, data)
49         except NameError as err:
50             logging.error("Probably algorithm '{alg}' is not defined: {err}".
51                           format(alg=table["algorithm"], err=repr(err)))
52     logging.info("Done.")
53
54
55 def table_details(table, input_data):
56     """Generate the table(s) with algorithm: table_detailed_test_results
57     specified in the specification file.
58
59     :param table: Table to generate.
60     :param input_data: Data to process.
61     :type table: pandas.Series
62     :type input_data: InputData
63     """
64
65     logging.info("  Generating the table {0} ...".
66                  format(table.get("title", "")))
67
68     # Transform the data
69     logging.info("    Creating the data set for the {0} '{1}'.".
70                  format(table.get("type", ""), table.get("title", "")))
71     data = input_data.filter_data(table)
72
73     # Prepare the header of the tables
74     header = list()
75     for column in table["columns"]:
76         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
77
78     # Generate the data for the table according to the model in the table
79     # specification
80     job = table["data"].keys()[0]
81     build = str(table["data"][job][0])
82     try:
83         suites = input_data.suites(job, build)
84     except KeyError:
85         logging.error("    No data available. The table will not be generated.")
86         return
87
88     for suite_longname, suite in suites.iteritems():
89         # Generate data
90         suite_name = suite["name"]
91         table_lst = list()
92         for test in data[job][build].keys():
93             if data[job][build][test]["parent"] in suite_name:
94                 row_lst = list()
95                 for column in table["columns"]:
96                     try:
97                         col_data = str(data[job][build][test][column["data"].
98                                        split(" ")[1]]).replace('"', '""')
99                         if column["data"].split(" ")[1] in ("conf-history",
100                                                             "show-run"):
101                             col_data = replace(col_data, " |br| ", "",
102                                                maxreplace=1)
103                             col_data = " |prein| {0} |preout| ".\
104                                 format(col_data[:-5])
105                         row_lst.append('"{0}"'.format(col_data))
106                     except KeyError:
107                         row_lst.append("No data")
108                 table_lst.append(row_lst)
109
110         # Write the data to file
111         if table_lst:
112             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113                                             table["output-file-ext"])
114             logging.info("      Writing file: '{}'".format(file_name))
115             with open(file_name, "w") as file_handler:
116                 file_handler.write(",".join(header) + "\n")
117                 for item in table_lst:
118                     file_handler.write(",".join(item) + "\n")
119
120     logging.info("  Done.")
121
122
123 def table_merged_details(table, input_data):
124     """Generate the table(s) with algorithm: table_merged_details
125     specified in the specification file.
126
127     :param table: Table to generate.
128     :param input_data: Data to process.
129     :type table: pandas.Series
130     :type input_data: InputData
131     """
132
133     logging.info("  Generating the table {0} ...".
134                  format(table.get("title", "")))
135
136     # Transform the data
137     logging.info("    Creating the data set for the {0} '{1}'.".
138                  format(table.get("type", ""), table.get("title", "")))
139     data = input_data.filter_data(table)
140     data = input_data.merge_data(data)
141     data.sort_index(inplace=True)
142
143     logging.info("    Creating the data set for the {0} '{1}'.".
144                  format(table.get("type", ""), table.get("title", "")))
145     suites = input_data.filter_data(table, data_set="suites")
146     suites = input_data.merge_data(suites)
147
148     # Prepare the header of the tables
149     header = list()
150     for column in table["columns"]:
151         header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
152
153     for _, suite in suites.iteritems():
154         # Generate data
155         suite_name = suite["name"]
156         table_lst = list()
157         for test in data.keys():
158             if data[test]["parent"] in suite_name:
159                 row_lst = list()
160                 for column in table["columns"]:
161                     try:
162                         col_data = str(data[test][column["data"].
163                                        split(" ")[1]]).replace('"', '""')
164                         col_data = replace(col_data, "No Data",
165                                            "Not Captured     ")
166                         if column["data"].split(" ")[1] in ("conf-history",
167                                                             "show-run"):
168                             col_data = replace(col_data, " |br| ", "",
169                                                maxreplace=1)
170                             col_data = " |prein| {0} |preout| ".\
171                                 format(col_data[:-5])
172                         row_lst.append('"{0}"'.format(col_data))
173                     except KeyError:
174                         row_lst.append('"Not captured"')
175                 table_lst.append(row_lst)
176
177         # Write the data to file
178         if table_lst:
179             file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180                                             table["output-file-ext"])
181             logging.info("      Writing file: '{}'".format(file_name))
182             with open(file_name, "w") as file_handler:
183                 file_handler.write(",".join(header) + "\n")
184                 for item in table_lst:
185                     file_handler.write(",".join(item) + "\n")
186
187     logging.info("  Done.")
188
189
190 def table_performance_comparison(table, input_data):
191     """Generate the table(s) with algorithm: table_performance_comparison
192     specified in the specification file.
193
194     :param table: Table to generate.
195     :param input_data: Data to process.
196     :type table: pandas.Series
197     :type input_data: InputData
198     """
199
200     logging.info("  Generating the table {0} ...".
201                  format(table.get("title", "")))
202
203     # Transform the data
204     logging.info("    Creating the data set for the {0} '{1}'.".
205                  format(table.get("type", ""), table.get("title", "")))
206     data = input_data.filter_data(table, continue_on_error=True)
207
208     # Prepare the header of the tables
209     try:
210         header = ["Test case", ]
211
212         if table["include-tests"] == "MRR":
213             hdr_param = "Receive Rate"
214         else:
215             hdr_param = "Throughput"
216
217         history = table.get("history", None)
218         if history:
219             for item in history:
220                 header.extend(
221                     ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222                      "{0} Stdev [Mpps]".format(item["title"])])
223         header.extend(
224             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
228              "Delta [%]"])
229         header_str = ",".join(header) + "\n"
230     except (AttributeError, KeyError) as err:
231         logging.error("The model is invalid, missing parameter: {0}".
232                       format(err))
233         return
234
235     # Prepare data to the table:
236     tbl_dict = dict()
237     for job, builds in table["reference"]["data"].items():
238         for build in builds:
239             for tst_name, tst_data in data[job][str(build)].iteritems():
240                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
242                     replace("-ndrdisc", "").replace("-pdr", "").\
243                     replace("-ndr", "").\
244                     replace("1t1c", "1c").replace("2t1c", "1c").\
245                     replace("2t2c", "2c").replace("4t2c", "2c").\
246                     replace("4t4c", "4c").replace("8t4c", "4c")
247                 if "across topologies" in table["title"].lower():
248                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
249                 if tbl_dict.get(tst_name_mod, None) is None:
250                     groups = re.search(REGEX_NIC, tst_data["parent"])
251                     nic = groups.group(0) if groups else ""
252                     name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
253                                                           split("-")[:-1]))
254                     if "across testbeds" in table["title"].lower() or \
255                             "across topologies" in table["title"].lower():
256                         name = name.\
257                             replace("1t1c", "1c").replace("2t1c", "1c").\
258                             replace("2t2c", "2c").replace("4t2c", "2c").\
259                             replace("4t4c", "4c").replace("8t4c", "4c")
260                     tbl_dict[tst_name_mod] = {"name": name,
261                                               "ref-data": list(),
262                                               "cmp-data": list()}
263                 try:
264                     # TODO: Re-work when NDRPDRDISC tests are not used
265                     if table["include-tests"] == "MRR":
266                         tbl_dict[tst_name_mod]["ref-data"]. \
267                             append(tst_data["result"]["receive-rate"].avg)
268                     elif table["include-tests"] == "PDR":
269                         if tst_data["type"] == "PDR":
270                             tbl_dict[tst_name_mod]["ref-data"]. \
271                                 append(tst_data["throughput"]["value"])
272                         elif tst_data["type"] == "NDRPDR":
273                             tbl_dict[tst_name_mod]["ref-data"].append(
274                                 tst_data["throughput"]["PDR"]["LOWER"])
275                     elif table["include-tests"] == "NDR":
276                         if tst_data["type"] == "NDR":
277                             tbl_dict[tst_name_mod]["ref-data"]. \
278                                 append(tst_data["throughput"]["value"])
279                         elif tst_data["type"] == "NDRPDR":
280                             tbl_dict[tst_name_mod]["ref-data"].append(
281                                 tst_data["throughput"]["NDR"]["LOWER"])
282                     else:
283                         continue
284                 except TypeError:
285                     pass  # No data in output.xml for this test
286
287     for job, builds in table["compare"]["data"].items():
288         for build in builds:
289             for tst_name, tst_data in data[job][str(build)].iteritems():
290                 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291                     replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292                     replace("-ndrdisc", "").replace("-pdr", ""). \
293                     replace("-ndr", "").\
294                     replace("1t1c", "1c").replace("2t1c", "1c").\
295                     replace("2t2c", "2c").replace("4t2c", "2c").\
296                     replace("4t4c", "4c").replace("8t4c", "4c")
297                 if "across topologies" in table["title"].lower():
298                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
299                 try:
300                     # TODO: Re-work when NDRPDRDISC tests are not used
301                     if table["include-tests"] == "MRR":
302                         tbl_dict[tst_name_mod]["cmp-data"]. \
303                             append(tst_data["result"]["receive-rate"].avg)
304                     elif table["include-tests"] == "PDR":
305                         if tst_data["type"] == "PDR":
306                             tbl_dict[tst_name_mod]["cmp-data"]. \
307                                 append(tst_data["throughput"]["value"])
308                         elif tst_data["type"] == "NDRPDR":
309                             tbl_dict[tst_name_mod]["cmp-data"].append(
310                                 tst_data["throughput"]["PDR"]["LOWER"])
311                     elif table["include-tests"] == "NDR":
312                         if tst_data["type"] == "NDR":
313                             tbl_dict[tst_name_mod]["cmp-data"]. \
314                                 append(tst_data["throughput"]["value"])
315                         elif tst_data["type"] == "NDRPDR":
316                             tbl_dict[tst_name_mod]["cmp-data"].append(
317                                 tst_data["throughput"]["NDR"]["LOWER"])
318                     else:
319                         continue
320                 except KeyError:
321                     pass
322                 except TypeError:
323                     tbl_dict.pop(tst_name_mod, None)
324     if history:
325         for item in history:
326             for job, builds in item["data"].items():
327                 for build in builds:
328                     for tst_name, tst_data in data[job][str(build)].iteritems():
329                         tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
330                             replace("-ndrpdr", "").replace("-pdrdisc", ""). \
331                             replace("-ndrdisc", "").replace("-pdr", ""). \
332                             replace("-ndr", "").\
333                             replace("1t1c", "1c").replace("2t1c", "1c").\
334                             replace("2t2c", "2c").replace("4t2c", "2c").\
335                             replace("4t4c", "4c").replace("8t4c", "4c")
336                         if "across topologies" in table["title"].lower():
337                             tst_name_mod = tst_name_mod.replace("2n1l-", "")
338                         if tbl_dict.get(tst_name_mod, None) is None:
339                             continue
340                         if tbl_dict[tst_name_mod].get("history", None) is None:
341                             tbl_dict[tst_name_mod]["history"] = OrderedDict()
342                         if tbl_dict[tst_name_mod]["history"].get(item["title"],
343                                                              None) is None:
344                             tbl_dict[tst_name_mod]["history"][item["title"]] = \
345                                 list()
346                         try:
347                             # TODO: Re-work when NDRPDRDISC tests are not used
348                             if table["include-tests"] == "MRR":
349                                 tbl_dict[tst_name_mod]["history"][item["title"
350                                 ]].append(tst_data["result"]["receive-rate"].
351                                           avg)
352                             elif table["include-tests"] == "PDR":
353                                 if tst_data["type"] == "PDR":
354                                     tbl_dict[tst_name_mod]["history"][
355                                         item["title"]].\
356                                         append(tst_data["throughput"]["value"])
357                                 elif tst_data["type"] == "NDRPDR":
358                                     tbl_dict[tst_name_mod]["history"][item[
359                                         "title"]].append(tst_data["throughput"][
360                                         "PDR"]["LOWER"])
361                             elif table["include-tests"] == "NDR":
362                                 if tst_data["type"] == "NDR":
363                                     tbl_dict[tst_name_mod]["history"][
364                                         item["title"]].\
365                                         append(tst_data["throughput"]["value"])
366                                 elif tst_data["type"] == "NDRPDR":
367                                     tbl_dict[tst_name_mod]["history"][item[
368                                         "title"]].append(tst_data["throughput"][
369                                         "NDR"]["LOWER"])
370                             else:
371                                 continue
372                         except (TypeError, KeyError):
373                             pass
374
375     tbl_lst = list()
376     for tst_name in tbl_dict.keys():
377         item = [tbl_dict[tst_name]["name"], ]
378         if history:
379             if tbl_dict[tst_name].get("history", None) is not None:
380                 for hist_data in tbl_dict[tst_name]["history"].values():
381                     if hist_data:
382                         item.append(round(mean(hist_data) / 1000000, 2))
383                         item.append(round(stdev(hist_data) / 1000000, 2))
384                     else:
385                         item.extend([None, None])
386             else:
387                 item.extend([None, None])
388         data_t = tbl_dict[tst_name]["ref-data"]
389         if data_t:
390             item.append(round(mean(data_t) / 1000000, 2))
391             item.append(round(stdev(data_t) / 1000000, 2))
392         else:
393             item.extend([None, None])
394         data_t = tbl_dict[tst_name]["cmp-data"]
395         if data_t:
396             item.append(round(mean(data_t) / 1000000, 2))
397             item.append(round(stdev(data_t) / 1000000, 2))
398         else:
399             item.extend([None, None])
400         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
401             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
402         if len(item) == len(header):
403             tbl_lst.append(item)
404
405     # Sort the table according to the relative change
406     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
407
408     # Generate csv tables:
409     csv_file = "{0}.csv".format(table["output-file"])
410     with open(csv_file, "w") as file_handler:
411         file_handler.write(header_str)
412         for test in tbl_lst:
413             file_handler.write(",".join([str(item) for item in test]) + "\n")
414
415     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
416
417
418 def table_nics_comparison(table, input_data):
419     """Generate the table(s) with algorithm: table_nics_comparison
420     specified in the specification file.
421
422     :param table: Table to generate.
423     :param input_data: Data to process.
424     :type table: pandas.Series
425     :type input_data: InputData
426     """
427
428     logging.info("  Generating the table {0} ...".
429                  format(table.get("title", "")))
430
431     # Transform the data
432     logging.info("    Creating the data set for the {0} '{1}'.".
433                  format(table.get("type", ""), table.get("title", "")))
434     data = input_data.filter_data(table, continue_on_error=True)
435
436     # Prepare the header of the tables
437     try:
438         header = ["Test case", ]
439
440         if table["include-tests"] == "MRR":
441             hdr_param = "Receive Rate"
442         else:
443             hdr_param = "Throughput"
444
445         header.extend(
446             ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
447              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
448              "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
449              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
450              "Delta [%]"])
451         header_str = ",".join(header) + "\n"
452     except (AttributeError, KeyError) as err:
453         logging.error("The model is invalid, missing parameter: {0}".
454                       format(err))
455         return
456
457     # Prepare data to the table:
458     tbl_dict = dict()
459     for job, builds in table["data"].items():
460         for build in builds:
461             for tst_name, tst_data in data[job][str(build)].iteritems():
462                 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
463                     replace("-ndrpdr", "").replace("-pdrdisc", "").\
464                     replace("-ndrdisc", "").replace("-pdr", "").\
465                     replace("-ndr", "").\
466                     replace("1t1c", "1c").replace("2t1c", "1c").\
467                     replace("2t2c", "2c").replace("4t2c", "2c").\
468                     replace("4t4c", "4c").replace("8t4c", "4c")
469                 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
470                 if tbl_dict.get(tst_name_mod, None) is None:
471                     name = "-".join(tst_data["name"].split("-")[:-1])
472                     tbl_dict[tst_name_mod] = {"name": name,
473                                               "ref-data": list(),
474                                               "cmp-data": list()}
475                 try:
476                     if table["include-tests"] == "MRR":
477                         result = tst_data["result"]["receive-rate"].avg
478                     elif table["include-tests"] == "PDR":
479                         result = tst_data["throughput"]["PDR"]["LOWER"]
480                     elif table["include-tests"] == "NDR":
481                         result = tst_data["throughput"]["NDR"]["LOWER"]
482                     else:
483                         result = None
484
485                     if result:
486                         if table["reference"]["nic"] in tst_data["tags"]:
487                             tbl_dict[tst_name_mod]["ref-data"].append(result)
488                         elif table["compare"]["nic"] in tst_data["tags"]:
489                             tbl_dict[tst_name_mod]["cmp-data"].append(result)
490                 except (TypeError, KeyError) as err:
491                     logging.debug("No data for {0}".format(tst_name))
492                     logging.debug(repr(err))
493                     # No data in output.xml for this test
494
495     tbl_lst = list()
496     for tst_name in tbl_dict.keys():
497         item = [tbl_dict[tst_name]["name"], ]
498         data_t = tbl_dict[tst_name]["ref-data"]
499         if data_t:
500             item.append(round(mean(data_t) / 1000000, 2))
501             item.append(round(stdev(data_t) / 1000000, 2))
502         else:
503             item.extend([None, None])
504         data_t = tbl_dict[tst_name]["cmp-data"]
505         if data_t:
506             item.append(round(mean(data_t) / 1000000, 2))
507             item.append(round(stdev(data_t) / 1000000, 2))
508         else:
509             item.extend([None, None])
510         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
511             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
512         if len(item) == len(header):
513             tbl_lst.append(item)
514
515     # Sort the table according to the relative change
516     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
517
518     # Generate csv tables:
519     csv_file = "{0}.csv".format(table["output-file"])
520     with open(csv_file, "w") as file_handler:
521         file_handler.write(header_str)
522         for test in tbl_lst:
523             file_handler.write(",".join([str(item) for item in test]) + "\n")
524
525     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
526
527
528 def table_soak_vs_ndr(table, input_data):
529     """Generate the table(s) with algorithm: table_soak_vs_ndr
530     specified in the specification file.
531
532     :param table: Table to generate.
533     :param input_data: Data to process.
534     :type table: pandas.Series
535     :type input_data: InputData
536     """
537
538     logging.info("  Generating the table {0} ...".
539                  format(table.get("title", "")))
540
541     # Transform the data
542     logging.info("    Creating the data set for the {0} '{1}'.".
543                  format(table.get("type", ""), table.get("title", "")))
544     data = input_data.filter_data(table, continue_on_error=True)
545
546     # Prepare the header of the table
547     try:
548         header = [
549             "Test case",
550             "{0} Throughput [Mpps]".format(table["reference"]["title"]),
551             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
552             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
553             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
554             "Delta [%]"]
555         header_str = ",".join(header) + "\n"
556     except (AttributeError, KeyError) as err:
557         logging.error("The model is invalid, missing parameter: {0}".
558                       format(err))
559         return
560
561     # Create a list of available SOAK test results:
562     tbl_dict = dict()
563     for job, builds in table["compare"]["data"].items():
564         for build in builds:
565             for tst_name, tst_data in data[job][str(build)].iteritems():
566                 if tst_data["type"] == "SOAK":
567                     tst_name_mod = tst_name.replace("-soak", "")
568                     if tbl_dict.get(tst_name_mod, None) is None:
569                         tbl_dict[tst_name_mod] = {
570                             "name": tst_name_mod,
571                             "ref-data": list(),
572                             "cmp-data": list()
573                         }
574                     try:
575                         tbl_dict[tst_name_mod]["cmp-data"].append(
576                             tst_data["throughput"]["LOWER"])
577                     except (KeyError, TypeError):
578                         pass
579     tests_lst = tbl_dict.keys()
580
581     # Add corresponding NDR test results:
582     for job, builds in table["reference"]["data"].items():
583         for build in builds:
584             for tst_name, tst_data in data[job][str(build)].iteritems():
585                 tst_name_mod = tst_name.replace("-ndrpdr", "").\
586                     replace("-mrr", "")
587                 if tst_name_mod in tests_lst:
588                     try:
589                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
590                             if table["include-tests"] == "MRR":
591                                 result = tst_data["result"]["receive-rate"].avg
592                             elif table["include-tests"] == "PDR":
593                                 result = tst_data["throughput"]["PDR"]["LOWER"]
594                             elif table["include-tests"] == "NDR":
595                                 result = tst_data["throughput"]["NDR"]["LOWER"]
596                             else:
597                                 result = None
598                             if result is not None:
599                                 tbl_dict[tst_name_mod]["ref-data"].append(
600                                     result)
601                     except (KeyError, TypeError):
602                         continue
603
604     tbl_lst = list()
605     for tst_name in tbl_dict.keys():
606         item = [tbl_dict[tst_name]["name"], ]
607         data_t = tbl_dict[tst_name]["ref-data"]
608         if data_t:
609             item.append(round(mean(data_t) / 1000000, 2))
610             item.append(round(stdev(data_t) / 1000000, 2))
611         else:
612             item.extend([None, None])
613         data_t = tbl_dict[tst_name]["cmp-data"]
614         if data_t:
615             item.append(round(mean(data_t) / 1000000, 2))
616             item.append(round(stdev(data_t) / 1000000, 2))
617         else:
618             item.extend([None, None])
619         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
620             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
621         if len(item) == len(header):
622             tbl_lst.append(item)
623
624     # Sort the table according to the relative change
625     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
626
627     # Generate csv tables:
628     csv_file = "{0}.csv".format(table["output-file"])
629     with open(csv_file, "w") as file_handler:
630         file_handler.write(header_str)
631         for test in tbl_lst:
632             file_handler.write(",".join([str(item) for item in test]) + "\n")
633
634     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
635
636
637 def table_performance_trending_dashboard(table, input_data):
638     """Generate the table(s) with algorithm:
639     table_performance_trending_dashboard
640     specified in the specification file.
641
642     :param table: Table to generate.
643     :param input_data: Data to process.
644     :type table: pandas.Series
645     :type input_data: InputData
646     """
647
648     logging.info("  Generating the table {0} ...".
649                  format(table.get("title", "")))
650
651     # Transform the data
652     logging.info("    Creating the data set for the {0} '{1}'.".
653                  format(table.get("type", ""), table.get("title", "")))
654     data = input_data.filter_data(table, continue_on_error=True)
655
656     # Prepare the header of the tables
657     header = ["Test Case",
658               "Trend [Mpps]",
659               "Short-Term Change [%]",
660               "Long-Term Change [%]",
661               "Regressions [#]",
662               "Progressions [#]"
663               ]
664     header_str = ",".join(header) + "\n"
665
666     # Prepare data to the table:
667     tbl_dict = dict()
668     for job, builds in table["data"].items():
669         for build in builds:
670             for tst_name, tst_data in data[job][str(build)].iteritems():
671                 if tst_name.lower() in table["ignore-list"]:
672                     continue
673                 if tbl_dict.get(tst_name, None) is None:
674                     groups = re.search(REGEX_NIC, tst_data["parent"])
675                     if not groups:
676                         continue
677                     nic = groups.group(0)
678                     tbl_dict[tst_name] = {
679                         "name": "{0}-{1}".format(nic, tst_data["name"]),
680                         "data": OrderedDict()}
681                 try:
682                     tbl_dict[tst_name]["data"][str(build)] = \
683                         tst_data["result"]["receive-rate"]
684                 except (TypeError, KeyError):
685                     pass  # No data in output.xml for this test
686
687     tbl_lst = list()
688     for tst_name in tbl_dict.keys():
689         data_t = tbl_dict[tst_name]["data"]
690         if len(data_t) < 2:
691             continue
692
693         classification_lst, avgs = classify_anomalies(data_t)
694
695         win_size = min(len(data_t), table["window"])
696         long_win_size = min(len(data_t), table["long-trend-window"])
697
698         try:
699             max_long_avg = max(
700                 [x for x in avgs[-long_win_size:-win_size]
701                  if not isnan(x)])
702         except ValueError:
703             max_long_avg = nan
704         last_avg = avgs[-1]
705         avg_week_ago = avgs[max(-win_size, -len(avgs))]
706
707         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
708             rel_change_last = nan
709         else:
710             rel_change_last = round(
711                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
712
713         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
714             rel_change_long = nan
715         else:
716             rel_change_long = round(
717                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
718
719         if classification_lst:
720             if isnan(rel_change_last) and isnan(rel_change_long):
721                 continue
722             if (isnan(last_avg) or
723                 isnan(rel_change_last) or
724                 isnan(rel_change_long)):
725                 continue
726             tbl_lst.append(
727                 [tbl_dict[tst_name]["name"],
728                  round(last_avg / 1000000, 2),
729                  rel_change_last,
730                  rel_change_long,
731                  classification_lst[-win_size:].count("regression"),
732                  classification_lst[-win_size:].count("progression")])
733
734     tbl_lst.sort(key=lambda rel: rel[0])
735
736     tbl_sorted = list()
737     for nrr in range(table["window"], -1, -1):
738         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
739         for nrp in range(table["window"], -1, -1):
740             tbl_out = [item for item in tbl_reg if item[5] == nrp]
741             tbl_out.sort(key=lambda rel: rel[2])
742             tbl_sorted.extend(tbl_out)
743
744     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
745
746     logging.info("    Writing file: '{0}'".format(file_name))
747     with open(file_name, "w") as file_handler:
748         file_handler.write(header_str)
749         for test in tbl_sorted:
750             file_handler.write(",".join([str(item) for item in test]) + '\n')
751
752     txt_file_name = "{0}.txt".format(table["output-file"])
753     logging.info("    Writing file: '{0}'".format(txt_file_name))
754     convert_csv_to_pretty_txt(file_name, txt_file_name)
755
756
757 def _generate_url(base, testbed, test_name):
758     """Generate URL to a trending plot from the name of the test case.
759
760     :param base: The base part of URL common to all test cases.
761     :param testbed: The testbed used for testing.
762     :param test_name: The name of the test case.
763     :type base: str
764     :type testbed: str
765     :type test_name: str
766     :returns: The URL to the plot with the trending data for the given test
767         case.
768     :rtype str
769     """
770
771     url = base
772     file_name = ""
773     anchor = ".html#"
774     feature = ""
775
776     if "lbdpdk" in test_name or "lbvpp" in test_name:
777         file_name = "link_bonding"
778
779     elif "114b" in test_name and "vhost" in test_name:
780         file_name = "vts"
781
782     elif "testpmd" in test_name or "l3fwd" in test_name:
783         file_name = "dpdk"
784
785     elif "memif" in test_name:
786         file_name = "container_memif"
787         feature = "-base"
788
789     elif "srv6" in test_name:
790         file_name = "srv6"
791
792     elif "vhost" in test_name:
793         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
794             file_name = "vm_vhost_l2"
795             if "114b" in test_name:
796                 feature = ""
797             elif "l2xcbase" in test_name and "x520" in test_name:
798                 feature = "-base-l2xc"
799             elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
800                 feature = "-base-l2bd"
801             else:
802                 feature = "-base"
803         elif "ip4base" in test_name:
804             file_name = "vm_vhost_ip4"
805             feature = "-base"
806
807     elif "ipsec" in test_name:
808         file_name = "ipsec"
809         feature = "-base-scale"
810
811     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
812         file_name = "ip4_tunnels"
813         feature = "-base"
814
815     elif "ip4base" in test_name or "ip4scale" in test_name:
816         file_name = "ip4"
817         if "xl710" in test_name:
818             feature = "-base-scale-features"
819         elif "iacl" in test_name:
820             feature = "-features-iacl"
821         elif "oacl" in test_name:
822             feature = "-features-oacl"
823         elif "snat" in test_name or "cop" in test_name:
824             feature = "-features"
825         else:
826             feature = "-base-scale"
827
828     elif "ip6base" in test_name or "ip6scale" in test_name:
829         file_name = "ip6"
830         feature = "-base-scale"
831
832     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
833             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
834             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
835         file_name = "l2"
836         if "macip" in test_name:
837             feature = "-features-macip"
838         elif "iacl" in test_name:
839             feature = "-features-iacl"
840         elif "oacl" in test_name:
841             feature = "-features-oacl"
842         else:
843             feature = "-base-scale"
844
845     if "x520" in test_name:
846         nic = "x520-"
847     elif "x710" in test_name:
848         nic = "x710-"
849     elif "xl710" in test_name:
850         nic = "xl710-"
851     elif "xxv710" in test_name:
852         nic = "xxv710-"
853     elif "vic1227" in test_name:
854         nic = "vic1227-"
855     elif "vic1385" in test_name:
856         nic = "vic1385-"
857     else:
858         nic = ""
859     anchor += nic
860
861     if "64b" in test_name:
862         framesize = "64b"
863     elif "78b" in test_name:
864         framesize = "78b"
865     elif "imix" in test_name:
866         framesize = "imix"
867     elif "9000b" in test_name:
868         framesize = "9000b"
869     elif "1518b" in test_name:
870         framesize = "1518b"
871     elif "114b" in test_name:
872         framesize = "114b"
873     else:
874         framesize = ""
875     anchor += framesize + '-'
876
877     if "1t1c" in test_name:
878         anchor += "1t1c"
879     elif "2t2c" in test_name:
880         anchor += "2t2c"
881     elif "4t4c" in test_name:
882         anchor += "4t4c"
883     elif "2t1c" in test_name:
884         anchor += "2t1c"
885     elif "4t2c" in test_name:
886         anchor += "4t2c"
887     elif "8t4c" in test_name:
888         anchor += "8t4c"
889
890     return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
891            anchor + feature
892
893
894 def table_performance_trending_dashboard_html(table, input_data):
895     """Generate the table(s) with algorithm:
896     table_performance_trending_dashboard_html specified in the specification
897     file.
898
899     :param table: Table to generate.
900     :param input_data: Data to process.
901     :type table: dict
902     :type input_data: InputData
903     """
904
905     testbed = table.get("testbed", None)
906     if testbed is None:
907         logging.error("The testbed is not defined for the table '{0}'.".
908                       format(table.get("title", "")))
909         return
910
911     logging.info("  Generating the table {0} ...".
912                  format(table.get("title", "")))
913
914     try:
915         with open(table["input-file"], 'rb') as csv_file:
916             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
917             csv_lst = [item for item in csv_content]
918     except KeyError:
919         logging.warning("The input file is not defined.")
920         return
921     except csv.Error as err:
922         logging.warning("Not possible to process the file '{0}'.\n{1}".
923                         format(table["input-file"], err))
924         return
925
926     # Table:
927     dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
928
929     # Table header:
930     tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
931     for idx, item in enumerate(csv_lst[0]):
932         alignment = "left" if idx == 0 else "center"
933         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
934         th.text = item
935
936     # Rows:
937     colors = {"regression": ("#ffcccc", "#ff9999"),
938               "progression": ("#c6ecc6", "#9fdf9f"),
939               "normal": ("#e9f1fb", "#d4e4f7")}
940     for r_idx, row in enumerate(csv_lst[1:]):
941         if int(row[4]):
942             color = "regression"
943         elif int(row[5]):
944             color = "progression"
945         else:
946             color = "normal"
947         background = colors[color][r_idx % 2]
948         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
949
950         # Columns:
951         for c_idx, item in enumerate(row):
952             alignment = "left" if c_idx == 0 else "center"
953             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
954             # Name:
955             if c_idx == 0:
956                 url = _generate_url("../trending/", testbed, item)
957                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
958                 ref.text = item
959             else:
960                 td.text = item
961     try:
962         with open(table["output-file"], 'w') as html_file:
963             logging.info("    Writing file: '{0}'".format(table["output-file"]))
964             html_file.write(".. raw:: html\n\n\t")
965             html_file.write(ET.tostring(dashboard))
966             html_file.write("\n\t<p><br><br></p>\n")
967     except KeyError:
968         logging.warning("The output file is not defined.")
969         return
970
971
972 def table_last_failed_tests(table, input_data):
973     """Generate the table(s) with algorithm: table_last_failed_tests
974     specified in the specification file.
975
976     :param table: Table to generate.
977     :param input_data: Data to process.
978     :type table: pandas.Series
979     :type input_data: InputData
980     """
981
982     logging.info("  Generating the table {0} ...".
983                  format(table.get("title", "")))
984
985     # Transform the data
986     logging.info("    Creating the data set for the {0} '{1}'.".
987                  format(table.get("type", ""), table.get("title", "")))
988     data = input_data.filter_data(table, continue_on_error=True)
989
990     if data is None or data.empty:
991         logging.warn("    No data for the {0} '{1}'.".
992                      format(table.get("type", ""), table.get("title", "")))
993         return
994
995     tbl_list = list()
996     for job, builds in table["data"].items():
997         for build in builds:
998             build = str(build)
999             try:
1000                 version = input_data.metadata(job, build).get("version", "")
1001             except KeyError:
1002                 logging.error("Data for {job}: {build} is not present.".
1003                               format(job=job, build=build))
1004                 return
1005             tbl_list.append(build)
1006             tbl_list.append(version)
1007             for tst_name, tst_data in data[job][build].iteritems():
1008                 if tst_data["status"] != "FAIL":
1009                     continue
1010                 groups = re.search(REGEX_NIC, tst_data["parent"])
1011                 if not groups:
1012                     continue
1013                 nic = groups.group(0)
1014                 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1015
1016     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1017     logging.info("    Writing file: '{0}'".format(file_name))
1018     with open(file_name, "w") as file_handler:
1019         for test in tbl_list:
1020             file_handler.write(test + '\n')
1021
1022
1023 def table_failed_tests(table, input_data):
1024     """Generate the table(s) with algorithm: table_failed_tests
1025     specified in the specification file.
1026
1027     :param table: Table to generate.
1028     :param input_data: Data to process.
1029     :type table: pandas.Series
1030     :type input_data: InputData
1031     """
1032
1033     logging.info("  Generating the table {0} ...".
1034                  format(table.get("title", "")))
1035
1036     # Transform the data
1037     logging.info("    Creating the data set for the {0} '{1}'.".
1038                  format(table.get("type", ""), table.get("title", "")))
1039     data = input_data.filter_data(table, continue_on_error=True)
1040
1041     # Prepare the header of the tables
1042     header = ["Test Case",
1043               "Failures [#]",
1044               "Last Failure [Time]",
1045               "Last Failure [VPP-Build-Id]",
1046               "Last Failure [CSIT-Job-Build-Id]"]
1047
1048     # Generate the data for the table according to the model in the table
1049     # specification
1050
1051     now = dt.utcnow()
1052     timeperiod = timedelta(int(table.get("window", 7)))
1053
1054     tbl_dict = dict()
1055     for job, builds in table["data"].items():
1056         for build in builds:
1057             build = str(build)
1058             for tst_name, tst_data in data[job][build].iteritems():
1059                 if tst_name.lower() in table["ignore-list"]:
1060                     continue
1061                 if tbl_dict.get(tst_name, None) is None:
1062                     groups = re.search(REGEX_NIC, tst_data["parent"])
1063                     if not groups:
1064                         continue
1065                     nic = groups.group(0)
1066                     tbl_dict[tst_name] = {
1067                         "name": "{0}-{1}".format(nic, tst_data["name"]),
1068                         "data": OrderedDict()}
1069                 try:
1070                     generated = input_data.metadata(job, build).\
1071                         get("generated", "")
1072                     if not generated:
1073                         continue
1074                     then = dt.strptime(generated, "%Y%m%d %H:%M")
1075                     if (now - then) <= timeperiod:
1076                         tbl_dict[tst_name]["data"][build] = (
1077                             tst_data["status"],
1078                             generated,
1079                             input_data.metadata(job, build).get("version", ""),
1080                             build)
1081                 except (TypeError, KeyError) as err:
1082                     logging.warning("tst_name: {} - err: {}".
1083                                     format(tst_name, repr(err)))
1084
1085     max_fails = 0
1086     tbl_lst = list()
1087     for tst_data in tbl_dict.values():
1088         fails_nr = 0
1089         for val in tst_data["data"].values():
1090             if val[0] == "FAIL":
1091                 fails_nr += 1
1092                 fails_last_date = val[1]
1093                 fails_last_vpp = val[2]
1094                 fails_last_csit = val[3]
1095         if fails_nr:
1096             max_fails = fails_nr if fails_nr > max_fails else max_fails
1097             tbl_lst.append([tst_data["name"],
1098                             fails_nr,
1099                             fails_last_date,
1100                             fails_last_vpp,
1101                             "mrr-daily-build-{0}".format(fails_last_csit)])
1102
1103     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1104     tbl_sorted = list()
1105     for nrf in range(max_fails, -1, -1):
1106         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1107         tbl_sorted.extend(tbl_fails)
1108     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1109
1110     logging.info("    Writing file: '{0}'".format(file_name))
1111     with open(file_name, "w") as file_handler:
1112         file_handler.write(",".join(header) + "\n")
1113         for test in tbl_sorted:
1114             file_handler.write(",".join([str(item) for item in test]) + '\n')
1115
1116     txt_file_name = "{0}.txt".format(table["output-file"])
1117     logging.info("    Writing file: '{0}'".format(txt_file_name))
1118     convert_csv_to_pretty_txt(file_name, txt_file_name)
1119
1120
1121 def table_failed_tests_html(table, input_data):
1122     """Generate the table(s) with algorithm: table_failed_tests_html
1123     specified in the specification file.
1124
1125     :param table: Table to generate.
1126     :param input_data: Data to process.
1127     :type table: pandas.Series
1128     :type input_data: InputData
1129     """
1130
1131     testbed = table.get("testbed", None)
1132     if testbed is None:
1133         logging.error("The testbed is not defined for the table '{0}'.".
1134                       format(table.get("title", "")))
1135         return
1136
1137     logging.info("  Generating the table {0} ...".
1138                  format(table.get("title", "")))
1139
1140     try:
1141         with open(table["input-file"], 'rb') as csv_file:
1142             csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1143             csv_lst = [item for item in csv_content]
1144     except KeyError:
1145         logging.warning("The input file is not defined.")
1146         return
1147     except csv.Error as err:
1148         logging.warning("Not possible to process the file '{0}'.\n{1}".
1149                         format(table["input-file"], err))
1150         return
1151
1152     # Table:
1153     failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1154
1155     # Table header:
1156     tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1157     for idx, item in enumerate(csv_lst[0]):
1158         alignment = "left" if idx == 0 else "center"
1159         th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1160         th.text = item
1161
1162     # Rows:
1163     colors = ("#e9f1fb", "#d4e4f7")
1164     for r_idx, row in enumerate(csv_lst[1:]):
1165         background = colors[r_idx % 2]
1166         tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1167
1168         # Columns:
1169         for c_idx, item in enumerate(row):
1170             alignment = "left" if c_idx == 0 else "center"
1171             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1172             # Name:
1173             if c_idx == 0:
1174                 url = _generate_url("../trending/", testbed, item)
1175                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1176                 ref.text = item
1177             else:
1178                 td.text = item
1179     try:
1180         with open(table["output-file"], 'w') as html_file:
1181             logging.info("    Writing file: '{0}'".format(table["output-file"]))
1182             html_file.write(".. raw:: html\n\n\t")
1183             html_file.write(ET.tostring(failed_tests))
1184             html_file.write("\n\t<p><br><br></p>\n")
1185     except KeyError:
1186         logging.warning("The output file is not defined.")
1187         return

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.