1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Receive Rate"
215 hdr_param = "Throughput"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
239 for tst_name, tst_data in data[job][str(build)].iteritems():
240 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241 replace("-ndrpdr", "").replace("-pdrdisc", "").\
242 replace("-ndrdisc", "").replace("-pdr", "").\
243 replace("-ndr", "").\
244 replace("1t1c", "1c").replace("2t1c", "1c").\
245 replace("2t2c", "2c").replace("4t2c", "2c").\
246 replace("4t4c", "4c").replace("8t4c", "4c")
247 if "across topologies" in table["title"].lower():
248 tst_name_mod = tst_name_mod.replace("2n1l-", "")
249 if tbl_dict.get(tst_name_mod, None) is None:
250 groups = re.search(REGEX_NIC, tst_data["parent"])
251 nic = groups.group(0) if groups else ""
252 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254 if "across testbeds" in table["title"].lower() or \
255 "across topologies" in table["title"].lower():
257 replace("1t1c", "1c").replace("2t1c", "1c").\
258 replace("2t2c", "2c").replace("4t2c", "2c").\
259 replace("4t4c", "4c").replace("8t4c", "4c")
260 tbl_dict[tst_name_mod] = {"name": name,
264 # TODO: Re-work when NDRPDRDISC tests are not used
265 if table["include-tests"] == "MRR":
266 tbl_dict[tst_name_mod]["ref-data"]. \
267 append(tst_data["result"]["receive-rate"].avg)
268 elif table["include-tests"] == "PDR":
269 if tst_data["type"] == "PDR":
270 tbl_dict[tst_name_mod]["ref-data"]. \
271 append(tst_data["throughput"]["value"])
272 elif tst_data["type"] == "NDRPDR":
273 tbl_dict[tst_name_mod]["ref-data"].append(
274 tst_data["throughput"]["PDR"]["LOWER"])
275 elif table["include-tests"] == "NDR":
276 if tst_data["type"] == "NDR":
277 tbl_dict[tst_name_mod]["ref-data"]. \
278 append(tst_data["throughput"]["value"])
279 elif tst_data["type"] == "NDRPDR":
280 tbl_dict[tst_name_mod]["ref-data"].append(
281 tst_data["throughput"]["NDR"]["LOWER"])
285 pass # No data in output.xml for this test
287 for job, builds in table["compare"]["data"].items():
289 for tst_name, tst_data in data[job][str(build)].iteritems():
290 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292 replace("-ndrdisc", "").replace("-pdr", ""). \
293 replace("-ndr", "").\
294 replace("1t1c", "1c").replace("2t1c", "1c").\
295 replace("2t2c", "2c").replace("4t2c", "2c").\
296 replace("4t4c", "4c").replace("8t4c", "4c")
297 if "across topologies" in table["title"].lower():
298 tst_name_mod = tst_name_mod.replace("2n1l-", "")
300 # TODO: Re-work when NDRPDRDISC tests are not used
301 if table["include-tests"] == "MRR":
302 tbl_dict[tst_name_mod]["cmp-data"]. \
303 append(tst_data["result"]["receive-rate"].avg)
304 elif table["include-tests"] == "PDR":
305 if tst_data["type"] == "PDR":
306 tbl_dict[tst_name_mod]["cmp-data"]. \
307 append(tst_data["throughput"]["value"])
308 elif tst_data["type"] == "NDRPDR":
309 tbl_dict[tst_name_mod]["cmp-data"].append(
310 tst_data["throughput"]["PDR"]["LOWER"])
311 elif table["include-tests"] == "NDR":
312 if tst_data["type"] == "NDR":
313 tbl_dict[tst_name_mod]["cmp-data"]. \
314 append(tst_data["throughput"]["value"])
315 elif tst_data["type"] == "NDRPDR":
316 tbl_dict[tst_name_mod]["cmp-data"].append(
317 tst_data["throughput"]["NDR"]["LOWER"])
323 tbl_dict.pop(tst_name_mod, None)
326 for job, builds in item["data"].items():
328 for tst_name, tst_data in data[job][str(build)].iteritems():
329 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
330 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
331 replace("-ndrdisc", "").replace("-pdr", ""). \
332 replace("-ndr", "").\
333 replace("1t1c", "1c").replace("2t1c", "1c").\
334 replace("2t2c", "2c").replace("4t2c", "2c").\
335 replace("4t4c", "4c").replace("8t4c", "4c")
336 if "across topologies" in table["title"].lower():
337 tst_name_mod = tst_name_mod.replace("2n1l-", "")
338 if tbl_dict.get(tst_name_mod, None) is None:
340 if tbl_dict[tst_name_mod].get("history", None) is None:
341 tbl_dict[tst_name_mod]["history"] = OrderedDict()
342 if tbl_dict[tst_name_mod]["history"].get(item["title"],
344 tbl_dict[tst_name_mod]["history"][item["title"]] = \
347 # TODO: Re-work when NDRPDRDISC tests are not used
348 if table["include-tests"] == "MRR":
349 tbl_dict[tst_name_mod]["history"][item["title"
350 ]].append(tst_data["result"]["receive-rate"].
352 elif table["include-tests"] == "PDR":
353 if tst_data["type"] == "PDR":
354 tbl_dict[tst_name_mod]["history"][
356 append(tst_data["throughput"]["value"])
357 elif tst_data["type"] == "NDRPDR":
358 tbl_dict[tst_name_mod]["history"][item[
359 "title"]].append(tst_data["throughput"][
361 elif table["include-tests"] == "NDR":
362 if tst_data["type"] == "NDR":
363 tbl_dict[tst_name_mod]["history"][
365 append(tst_data["throughput"]["value"])
366 elif tst_data["type"] == "NDRPDR":
367 tbl_dict[tst_name_mod]["history"][item[
368 "title"]].append(tst_data["throughput"][
372 except (TypeError, KeyError):
376 for tst_name in tbl_dict.keys():
377 item = [tbl_dict[tst_name]["name"], ]
379 if tbl_dict[tst_name].get("history", None) is not None:
380 for hist_data in tbl_dict[tst_name]["history"].values():
382 item.append(round(mean(hist_data) / 1000000, 2))
383 item.append(round(stdev(hist_data) / 1000000, 2))
385 item.extend([None, None])
387 item.extend([None, None])
388 data_t = tbl_dict[tst_name]["ref-data"]
390 item.append(round(mean(data_t) / 1000000, 2))
391 item.append(round(stdev(data_t) / 1000000, 2))
393 item.extend([None, None])
394 data_t = tbl_dict[tst_name]["cmp-data"]
396 item.append(round(mean(data_t) / 1000000, 2))
397 item.append(round(stdev(data_t) / 1000000, 2))
399 item.extend([None, None])
400 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
401 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
402 if len(item) == len(header):
405 # Sort the table according to the relative change
406 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
408 # Generate csv tables:
409 csv_file = "{0}.csv".format(table["output-file"])
410 with open(csv_file, "w") as file_handler:
411 file_handler.write(header_str)
413 file_handler.write(",".join([str(item) for item in test]) + "\n")
415 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
418 def table_performance_comparison_nic(table, input_data):
419 """Generate the table(s) with algorithm: table_performance_comparison
420 specified in the specification file.
422 :param table: Table to generate.
423 :param input_data: Data to process.
424 :type table: pandas.Series
425 :type input_data: InputData
428 logging.info(" Generating the table {0} ...".
429 format(table.get("title", "")))
432 logging.info(" Creating the data set for the {0} '{1}'.".
433 format(table.get("type", ""), table.get("title", "")))
434 data = input_data.filter_data(table, continue_on_error=True)
436 # Prepare the header of the tables
438 header = ["Test case", ]
440 if table["include-tests"] == "MRR":
441 hdr_param = "Receive Rate"
443 hdr_param = "Throughput"
445 history = table.get("history", None)
449 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
450 "{0} Stdev [Mpps]".format(item["title"])])
452 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
453 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
454 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
455 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
457 header_str = ",".join(header) + "\n"
458 except (AttributeError, KeyError) as err:
459 logging.error("The model is invalid, missing parameter: {0}".
463 # Prepare data to the table:
465 for job, builds in table["reference"]["data"].items():
467 for tst_name, tst_data in data[job][str(build)].iteritems():
468 if table["reference"]["nic"] not in tst_data["tags"]:
470 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
471 replace("-ndrpdr", "").replace("-pdrdisc", "").\
472 replace("-ndrdisc", "").replace("-pdr", "").\
473 replace("-ndr", "").\
474 replace("1t1c", "1c").replace("2t1c", "1c").\
475 replace("2t2c", "2c").replace("4t2c", "2c").\
476 replace("4t4c", "4c").replace("8t4c", "4c")
477 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
478 if "across topologies" in table["title"].lower():
479 tst_name_mod = tst_name_mod.replace("2n1l-", "")
480 if tbl_dict.get(tst_name_mod, None) is None:
481 name = "{0}".format("-".join(tst_data["name"].
483 if "across testbeds" in table["title"].lower() or \
484 "across topologies" in table["title"].lower():
486 replace("1t1c", "1c").replace("2t1c", "1c").\
487 replace("2t2c", "2c").replace("4t2c", "2c").\
488 replace("4t4c", "4c").replace("8t4c", "4c")
489 tbl_dict[tst_name_mod] = {"name": name,
493 # TODO: Re-work when NDRPDRDISC tests are not used
494 if table["include-tests"] == "MRR":
495 tbl_dict[tst_name_mod]["ref-data"]. \
496 append(tst_data["result"]["receive-rate"].avg)
497 elif table["include-tests"] == "PDR":
498 if tst_data["type"] == "PDR":
499 tbl_dict[tst_name_mod]["ref-data"]. \
500 append(tst_data["throughput"]["value"])
501 elif tst_data["type"] == "NDRPDR":
502 tbl_dict[tst_name_mod]["ref-data"].append(
503 tst_data["throughput"]["PDR"]["LOWER"])
504 elif table["include-tests"] == "NDR":
505 if tst_data["type"] == "NDR":
506 tbl_dict[tst_name_mod]["ref-data"]. \
507 append(tst_data["throughput"]["value"])
508 elif tst_data["type"] == "NDRPDR":
509 tbl_dict[tst_name_mod]["ref-data"].append(
510 tst_data["throughput"]["NDR"]["LOWER"])
514 pass # No data in output.xml for this test
516 for job, builds in table["compare"]["data"].items():
518 for tst_name, tst_data in data[job][str(build)].iteritems():
519 if table["compare"]["nic"] not in tst_data["tags"]:
521 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
522 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
523 replace("-ndrdisc", "").replace("-pdr", ""). \
524 replace("-ndr", "").\
525 replace("1t1c", "1c").replace("2t1c", "1c").\
526 replace("2t2c", "2c").replace("4t2c", "2c").\
527 replace("4t4c", "4c").replace("8t4c", "4c")
528 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
529 if "across topologies" in table["title"].lower():
530 tst_name_mod = tst_name_mod.replace("2n1l-", "")
532 # TODO: Re-work when NDRPDRDISC tests are not used
533 if table["include-tests"] == "MRR":
534 tbl_dict[tst_name_mod]["cmp-data"]. \
535 append(tst_data["result"]["receive-rate"].avg)
536 elif table["include-tests"] == "PDR":
537 if tst_data["type"] == "PDR":
538 tbl_dict[tst_name_mod]["cmp-data"]. \
539 append(tst_data["throughput"]["value"])
540 elif tst_data["type"] == "NDRPDR":
541 tbl_dict[tst_name_mod]["cmp-data"].append(
542 tst_data["throughput"]["PDR"]["LOWER"])
543 elif table["include-tests"] == "NDR":
544 if tst_data["type"] == "NDR":
545 tbl_dict[tst_name_mod]["cmp-data"]. \
546 append(tst_data["throughput"]["value"])
547 elif tst_data["type"] == "NDRPDR":
548 tbl_dict[tst_name_mod]["cmp-data"].append(
549 tst_data["throughput"]["NDR"]["LOWER"])
555 tbl_dict.pop(tst_name_mod, None)
558 for job, builds in item["data"].items():
560 for tst_name, tst_data in data[job][str(build)].iteritems():
561 if item["nic"] not in tst_data["tags"]:
563 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
564 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
565 replace("-ndrdisc", "").replace("-pdr", ""). \
566 replace("-ndr", "").\
567 replace("1t1c", "1c").replace("2t1c", "1c").\
568 replace("2t2c", "2c").replace("4t2c", "2c").\
569 replace("4t4c", "4c").replace("8t4c", "4c")
570 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
571 if "across topologies" in table["title"].lower():
572 tst_name_mod = tst_name_mod.replace("2n1l-", "")
573 if tbl_dict.get(tst_name_mod, None) is None:
575 if tbl_dict[tst_name_mod].get("history", None) is None:
576 tbl_dict[tst_name_mod]["history"] = OrderedDict()
577 if tbl_dict[tst_name_mod]["history"].get(item["title"],
579 tbl_dict[tst_name_mod]["history"][item["title"]] = \
582 # TODO: Re-work when NDRPDRDISC tests are not used
583 if table["include-tests"] == "MRR":
584 tbl_dict[tst_name_mod]["history"][item["title"
585 ]].append(tst_data["result"]["receive-rate"].
587 elif table["include-tests"] == "PDR":
588 if tst_data["type"] == "PDR":
589 tbl_dict[tst_name_mod]["history"][
591 append(tst_data["throughput"]["value"])
592 elif tst_data["type"] == "NDRPDR":
593 tbl_dict[tst_name_mod]["history"][item[
594 "title"]].append(tst_data["throughput"][
596 elif table["include-tests"] == "NDR":
597 if tst_data["type"] == "NDR":
598 tbl_dict[tst_name_mod]["history"][
600 append(tst_data["throughput"]["value"])
601 elif tst_data["type"] == "NDRPDR":
602 tbl_dict[tst_name_mod]["history"][item[
603 "title"]].append(tst_data["throughput"][
607 except (TypeError, KeyError):
611 for tst_name in tbl_dict.keys():
612 item = [tbl_dict[tst_name]["name"], ]
614 if tbl_dict[tst_name].get("history", None) is not None:
615 for hist_data in tbl_dict[tst_name]["history"].values():
617 item.append(round(mean(hist_data) / 1000000, 2))
618 item.append(round(stdev(hist_data) / 1000000, 2))
620 item.extend([None, None])
622 item.extend([None, None])
623 data_t = tbl_dict[tst_name]["ref-data"]
625 item.append(round(mean(data_t) / 1000000, 2))
626 item.append(round(stdev(data_t) / 1000000, 2))
628 item.extend([None, None])
629 data_t = tbl_dict[tst_name]["cmp-data"]
631 item.append(round(mean(data_t) / 1000000, 2))
632 item.append(round(stdev(data_t) / 1000000, 2))
634 item.extend([None, None])
635 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
636 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
637 if len(item) == len(header):
640 # Sort the table according to the relative change
641 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
643 # Generate csv tables:
644 csv_file = "{0}.csv".format(table["output-file"])
645 with open(csv_file, "w") as file_handler:
646 file_handler.write(header_str)
648 file_handler.write(",".join([str(item) for item in test]) + "\n")
650 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
653 def table_nics_comparison(table, input_data):
654 """Generate the table(s) with algorithm: table_nics_comparison
655 specified in the specification file.
657 :param table: Table to generate.
658 :param input_data: Data to process.
659 :type table: pandas.Series
660 :type input_data: InputData
663 logging.info(" Generating the table {0} ...".
664 format(table.get("title", "")))
667 logging.info(" Creating the data set for the {0} '{1}'.".
668 format(table.get("type", ""), table.get("title", "")))
669 data = input_data.filter_data(table, continue_on_error=True)
671 # Prepare the header of the tables
673 header = ["Test case", ]
675 if table["include-tests"] == "MRR":
676 hdr_param = "Receive Rate"
678 hdr_param = "Throughput"
681 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
682 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
683 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
684 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
686 header_str = ",".join(header) + "\n"
687 except (AttributeError, KeyError) as err:
688 logging.error("The model is invalid, missing parameter: {0}".
692 # Prepare data to the table:
694 for job, builds in table["data"].items():
696 for tst_name, tst_data in data[job][str(build)].iteritems():
697 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
698 replace("-ndrpdr", "").replace("-pdrdisc", "").\
699 replace("-ndrdisc", "").replace("-pdr", "").\
700 replace("-ndr", "").\
701 replace("1t1c", "1c").replace("2t1c", "1c").\
702 replace("2t2c", "2c").replace("4t2c", "2c").\
703 replace("4t4c", "4c").replace("8t4c", "4c")
704 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
705 if tbl_dict.get(tst_name_mod, None) is None:
706 name = "-".join(tst_data["name"].split("-")[:-1])
707 tbl_dict[tst_name_mod] = {"name": name,
711 if table["include-tests"] == "MRR":
712 result = tst_data["result"]["receive-rate"].avg
713 elif table["include-tests"] == "PDR":
714 result = tst_data["throughput"]["PDR"]["LOWER"]
715 elif table["include-tests"] == "NDR":
716 result = tst_data["throughput"]["NDR"]["LOWER"]
721 if table["reference"]["nic"] in tst_data["tags"]:
722 tbl_dict[tst_name_mod]["ref-data"].append(result)
723 elif table["compare"]["nic"] in tst_data["tags"]:
724 tbl_dict[tst_name_mod]["cmp-data"].append(result)
725 except (TypeError, KeyError) as err:
726 logging.debug("No data for {0}".format(tst_name))
727 logging.debug(repr(err))
728 # No data in output.xml for this test
731 for tst_name in tbl_dict.keys():
732 item = [tbl_dict[tst_name]["name"], ]
733 data_t = tbl_dict[tst_name]["ref-data"]
735 item.append(round(mean(data_t) / 1000000, 2))
736 item.append(round(stdev(data_t) / 1000000, 2))
738 item.extend([None, None])
739 data_t = tbl_dict[tst_name]["cmp-data"]
741 item.append(round(mean(data_t) / 1000000, 2))
742 item.append(round(stdev(data_t) / 1000000, 2))
744 item.extend([None, None])
745 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
746 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
747 if len(item) == len(header):
750 # Sort the table according to the relative change
751 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
753 # Generate csv tables:
754 csv_file = "{0}.csv".format(table["output-file"])
755 with open(csv_file, "w") as file_handler:
756 file_handler.write(header_str)
758 file_handler.write(",".join([str(item) for item in test]) + "\n")
760 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
763 def table_soak_vs_ndr(table, input_data):
764 """Generate the table(s) with algorithm: table_soak_vs_ndr
765 specified in the specification file.
767 :param table: Table to generate.
768 :param input_data: Data to process.
769 :type table: pandas.Series
770 :type input_data: InputData
773 logging.info(" Generating the table {0} ...".
774 format(table.get("title", "")))
777 logging.info(" Creating the data set for the {0} '{1}'.".
778 format(table.get("type", ""), table.get("title", "")))
779 data = input_data.filter_data(table, continue_on_error=True)
781 # Prepare the header of the table
785 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
786 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
787 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
788 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
789 "Delta [%]", "Stdev of delta [%]"]
790 header_str = ",".join(header) + "\n"
791 except (AttributeError, KeyError) as err:
792 logging.error("The model is invalid, missing parameter: {0}".
796 # Create a list of available SOAK test results:
798 for job, builds in table["compare"]["data"].items():
800 for tst_name, tst_data in data[job][str(build)].iteritems():
801 if tst_data["type"] == "SOAK":
802 tst_name_mod = tst_name.replace("-soak", "")
803 if tbl_dict.get(tst_name_mod, None) is None:
804 groups = re.search(REGEX_NIC, tst_data["parent"])
805 nic = groups.group(0) if groups else ""
806 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
808 tbl_dict[tst_name_mod] = {
814 tbl_dict[tst_name_mod]["cmp-data"].append(
815 tst_data["throughput"]["LOWER"])
816 except (KeyError, TypeError):
818 tests_lst = tbl_dict.keys()
820 # Add corresponding NDR test results:
821 for job, builds in table["reference"]["data"].items():
823 for tst_name, tst_data in data[job][str(build)].iteritems():
824 tst_name_mod = tst_name.replace("-ndrpdr", "").\
826 if tst_name_mod in tests_lst:
828 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
829 if table["include-tests"] == "MRR":
830 result = tst_data["result"]["receive-rate"].avg
831 elif table["include-tests"] == "PDR":
832 result = tst_data["throughput"]["PDR"]["LOWER"]
833 elif table["include-tests"] == "NDR":
834 result = tst_data["throughput"]["NDR"]["LOWER"]
837 if result is not None:
838 tbl_dict[tst_name_mod]["ref-data"].append(
840 except (KeyError, TypeError):
844 for tst_name in tbl_dict.keys():
845 item = [tbl_dict[tst_name]["name"], ]
846 data_r = tbl_dict[tst_name]["ref-data"]
848 data_r_mean = mean(data_r)
849 item.append(round(data_r_mean / 1000000, 2))
850 data_r_stdev = stdev(data_r)
851 item.append(round(data_r_stdev / 1000000, 2))
855 item.extend([None, None])
856 data_c = tbl_dict[tst_name]["cmp-data"]
858 data_c_mean = mean(data_c)
859 item.append(round(data_c_mean / 1000000, 2))
860 data_c_stdev = stdev(data_c)
861 item.append(round(data_c_stdev / 1000000, 2))
865 item.extend([None, None])
866 if data_r_mean and data_c_mean:
867 delta, d_stdev = relative_change_stdev(
868 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
869 item.append(round(delta, 2))
870 item.append(round(d_stdev, 2))
873 # Sort the table according to the relative change
874 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
876 # Generate csv tables:
877 csv_file = "{0}.csv".format(table["output-file"])
878 with open(csv_file, "w") as file_handler:
879 file_handler.write(header_str)
881 file_handler.write(",".join([str(item) for item in test]) + "\n")
883 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
886 def table_performance_trending_dashboard(table, input_data):
887 """Generate the table(s) with algorithm:
888 table_performance_trending_dashboard
889 specified in the specification file.
891 :param table: Table to generate.
892 :param input_data: Data to process.
893 :type table: pandas.Series
894 :type input_data: InputData
897 logging.info(" Generating the table {0} ...".
898 format(table.get("title", "")))
901 logging.info(" Creating the data set for the {0} '{1}'.".
902 format(table.get("type", ""), table.get("title", "")))
903 data = input_data.filter_data(table, continue_on_error=True)
905 # Prepare the header of the tables
906 header = ["Test Case",
908 "Short-Term Change [%]",
909 "Long-Term Change [%]",
913 header_str = ",".join(header) + "\n"
915 # Prepare data to the table:
917 for job, builds in table["data"].items():
919 for tst_name, tst_data in data[job][str(build)].iteritems():
920 if tst_name.lower() in table.get("ignore-list", list()):
922 if tbl_dict.get(tst_name, None) is None:
923 groups = re.search(REGEX_NIC, tst_data["parent"])
926 nic = groups.group(0)
927 tbl_dict[tst_name] = {
928 "name": "{0}-{1}".format(nic, tst_data["name"]),
929 "data": OrderedDict()}
931 tbl_dict[tst_name]["data"][str(build)] = \
932 tst_data["result"]["receive-rate"]
933 except (TypeError, KeyError):
934 pass # No data in output.xml for this test
937 for tst_name in tbl_dict.keys():
938 data_t = tbl_dict[tst_name]["data"]
942 classification_lst, avgs = classify_anomalies(data_t)
944 win_size = min(len(data_t), table["window"])
945 long_win_size = min(len(data_t), table["long-trend-window"])
949 [x for x in avgs[-long_win_size:-win_size]
954 avg_week_ago = avgs[max(-win_size, -len(avgs))]
956 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
957 rel_change_last = nan
959 rel_change_last = round(
960 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
962 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
963 rel_change_long = nan
965 rel_change_long = round(
966 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
968 if classification_lst:
969 if isnan(rel_change_last) and isnan(rel_change_long):
971 if (isnan(last_avg) or
972 isnan(rel_change_last) or
973 isnan(rel_change_long)):
976 [tbl_dict[tst_name]["name"],
977 round(last_avg / 1000000, 2),
980 classification_lst[-win_size:].count("regression"),
981 classification_lst[-win_size:].count("progression")])
983 tbl_lst.sort(key=lambda rel: rel[0])
986 for nrr in range(table["window"], -1, -1):
987 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
988 for nrp in range(table["window"], -1, -1):
989 tbl_out = [item for item in tbl_reg if item[5] == nrp]
990 tbl_out.sort(key=lambda rel: rel[2])
991 tbl_sorted.extend(tbl_out)
993 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
995 logging.info(" Writing file: '{0}'".format(file_name))
996 with open(file_name, "w") as file_handler:
997 file_handler.write(header_str)
998 for test in tbl_sorted:
999 file_handler.write(",".join([str(item) for item in test]) + '\n')
1001 txt_file_name = "{0}.txt".format(table["output-file"])
1002 logging.info(" Writing file: '{0}'".format(txt_file_name))
1003 convert_csv_to_pretty_txt(file_name, txt_file_name)
1006 def _generate_url(base, testbed, test_name):
1007 """Generate URL to a trending plot from the name of the test case.
1009 :param base: The base part of URL common to all test cases.
1010 :param testbed: The testbed used for testing.
1011 :param test_name: The name of the test case.
1014 :type test_name: str
1015 :returns: The URL to the plot with the trending data for the given test
1025 if "lbdpdk" in test_name or "lbvpp" in test_name:
1026 file_name = "link_bonding"
1028 elif "114b" in test_name and "vhost" in test_name:
1031 elif "testpmd" in test_name or "l3fwd" in test_name:
1034 elif "memif" in test_name:
1035 file_name = "container_memif"
1038 elif "srv6" in test_name:
1041 elif "vhost" in test_name:
1042 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1043 file_name = "vm_vhost_l2"
1044 if "114b" in test_name:
1046 elif "l2xcbase" in test_name and "x520" in test_name:
1047 feature = "-base-l2xc"
1048 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1049 feature = "-base-l2bd"
1052 elif "ip4base" in test_name:
1053 file_name = "vm_vhost_ip4"
1056 elif "ipsecbasetnlsw" in test_name:
1057 file_name = "ipsecsw"
1058 feature = "-base-scale"
1060 elif "ipsec" in test_name:
1062 feature = "-base-scale"
1063 if "hw-" in test_name:
1064 file_name = "ipsechw"
1065 elif "sw-" in test_name:
1066 file_name = "ipsecsw"
1067 if "-int-" in test_name:
1068 feature = "-base-scale-int"
1069 elif "tnl" in test_name:
1070 feature = "-base-scale-tnl"
1072 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1073 file_name = "ip4_tunnels"
1076 elif "ip4base" in test_name or "ip4scale" in test_name:
1078 if "xl710" in test_name:
1079 feature = "-base-scale-features"
1080 elif "iacl" in test_name:
1081 feature = "-features-iacl"
1082 elif "oacl" in test_name:
1083 feature = "-features-oacl"
1084 elif "snat" in test_name or "cop" in test_name:
1085 feature = "-features"
1087 feature = "-base-scale"
1089 elif "ip6base" in test_name or "ip6scale" in test_name:
1091 feature = "-base-scale"
1093 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1094 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1095 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1097 if "macip" in test_name:
1098 feature = "-features-macip"
1099 elif "iacl" in test_name:
1100 feature = "-features-iacl"
1101 elif "oacl" in test_name:
1102 feature = "-features-oacl"
1104 feature = "-base-scale"
1106 if "x520" in test_name:
1108 elif "x710" in test_name:
1110 elif "xl710" in test_name:
1112 elif "xxv710" in test_name:
1114 elif "vic1227" in test_name:
1116 elif "vic1385" in test_name:
1118 elif "x553" in test_name:
1124 if "64b" in test_name:
1126 elif "78b" in test_name:
1128 elif "imix" in test_name:
1130 elif "9000b" in test_name:
1132 elif "1518b" in test_name:
1134 elif "114b" in test_name:
1138 anchor += framesize + '-'
1140 if "1t1c" in test_name:
1142 elif "2t2c" in test_name:
1144 elif "4t4c" in test_name:
1146 elif "2t1c" in test_name:
1148 elif "4t2c" in test_name:
1150 elif "8t4c" in test_name:
1153 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1154 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1157 def table_performance_trending_dashboard_html(table, input_data):
1158 """Generate the table(s) with algorithm:
1159 table_performance_trending_dashboard_html specified in the specification
1162 :param table: Table to generate.
1163 :param input_data: Data to process.
1165 :type input_data: InputData
1168 testbed = table.get("testbed", None)
1170 logging.error("The testbed is not defined for the table '{0}'.".
1171 format(table.get("title", "")))
1174 logging.info(" Generating the table {0} ...".
1175 format(table.get("title", "")))
1178 with open(table["input-file"], 'rb') as csv_file:
1179 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1180 csv_lst = [item for item in csv_content]
1182 logging.warning("The input file is not defined.")
1184 except csv.Error as err:
1185 logging.warning("Not possible to process the file '{0}'.\n{1}".
1186 format(table["input-file"], err))
1190 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1193 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1194 for idx, item in enumerate(csv_lst[0]):
1195 alignment = "left" if idx == 0 else "center"
1196 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1200 colors = {"regression": ("#ffcccc", "#ff9999"),
1201 "progression": ("#c6ecc6", "#9fdf9f"),
1202 "normal": ("#e9f1fb", "#d4e4f7")}
1203 for r_idx, row in enumerate(csv_lst[1:]):
1205 color = "regression"
1207 color = "progression"
1210 background = colors[color][r_idx % 2]
1211 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1214 for c_idx, item in enumerate(row):
1215 alignment = "left" if c_idx == 0 else "center"
1216 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1219 url = _generate_url("../trending/", testbed, item)
1220 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1225 with open(table["output-file"], 'w') as html_file:
1226 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1227 html_file.write(".. raw:: html\n\n\t")
1228 html_file.write(ET.tostring(dashboard))
1229 html_file.write("\n\t<p><br><br></p>\n")
1231 logging.warning("The output file is not defined.")
1235 def table_last_failed_tests(table, input_data):
1236 """Generate the table(s) with algorithm: table_last_failed_tests
1237 specified in the specification file.
1239 :param table: Table to generate.
1240 :param input_data: Data to process.
1241 :type table: pandas.Series
1242 :type input_data: InputData
1245 logging.info(" Generating the table {0} ...".
1246 format(table.get("title", "")))
1248 # Transform the data
1249 logging.info(" Creating the data set for the {0} '{1}'.".
1250 format(table.get("type", ""), table.get("title", "")))
1251 data = input_data.filter_data(table, continue_on_error=True)
1253 if data is None or data.empty:
1254 logging.warn(" No data for the {0} '{1}'.".
1255 format(table.get("type", ""), table.get("title", "")))
1259 for job, builds in table["data"].items():
1260 for build in builds:
1263 version = input_data.metadata(job, build).get("version", "")
1265 logging.error("Data for {job}: {build} is not present.".
1266 format(job=job, build=build))
1268 tbl_list.append(build)
1269 tbl_list.append(version)
1270 for tst_name, tst_data in data[job][build].iteritems():
1271 if tst_data["status"] != "FAIL":
1273 groups = re.search(REGEX_NIC, tst_data["parent"])
1276 nic = groups.group(0)
1277 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1279 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1280 logging.info(" Writing file: '{0}'".format(file_name))
1281 with open(file_name, "w") as file_handler:
1282 for test in tbl_list:
1283 file_handler.write(test + '\n')
1286 def table_failed_tests(table, input_data):
1287 """Generate the table(s) with algorithm: table_failed_tests
1288 specified in the specification file.
1290 :param table: Table to generate.
1291 :param input_data: Data to process.
1292 :type table: pandas.Series
1293 :type input_data: InputData
1296 logging.info(" Generating the table {0} ...".
1297 format(table.get("title", "")))
1299 # Transform the data
1300 logging.info(" Creating the data set for the {0} '{1}'.".
1301 format(table.get("type", ""), table.get("title", "")))
1302 data = input_data.filter_data(table, continue_on_error=True)
1304 # Prepare the header of the tables
1305 header = ["Test Case",
1307 "Last Failure [Time]",
1308 "Last Failure [VPP-Build-Id]",
1309 "Last Failure [CSIT-Job-Build-Id]"]
1311 # Generate the data for the table according to the model in the table
1315 timeperiod = timedelta(int(table.get("window", 7)))
1318 for job, builds in table["data"].items():
1319 for build in builds:
1321 for tst_name, tst_data in data[job][build].iteritems():
1322 if tst_name.lower() in table.get("ignore-list", list()):
1324 if tbl_dict.get(tst_name, None) is None:
1325 groups = re.search(REGEX_NIC, tst_data["parent"])
1328 nic = groups.group(0)
1329 tbl_dict[tst_name] = {
1330 "name": "{0}-{1}".format(nic, tst_data["name"]),
1331 "data": OrderedDict()}
1333 generated = input_data.metadata(job, build).\
1334 get("generated", "")
1337 then = dt.strptime(generated, "%Y%m%d %H:%M")
1338 if (now - then) <= timeperiod:
1339 tbl_dict[tst_name]["data"][build] = (
1342 input_data.metadata(job, build).get("version", ""),
1344 except (TypeError, KeyError) as err:
1345 logging.warning("tst_name: {} - err: {}".
1346 format(tst_name, repr(err)))
1350 for tst_data in tbl_dict.values():
1352 for val in tst_data["data"].values():
1353 if val[0] == "FAIL":
1355 fails_last_date = val[1]
1356 fails_last_vpp = val[2]
1357 fails_last_csit = val[3]
1359 max_fails = fails_nr if fails_nr > max_fails else max_fails
1360 tbl_lst.append([tst_data["name"],
1364 "mrr-daily-build-{0}".format(fails_last_csit)])
1366 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1368 for nrf in range(max_fails, -1, -1):
1369 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1370 tbl_sorted.extend(tbl_fails)
1371 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1373 logging.info(" Writing file: '{0}'".format(file_name))
1374 with open(file_name, "w") as file_handler:
1375 file_handler.write(",".join(header) + "\n")
1376 for test in tbl_sorted:
1377 file_handler.write(",".join([str(item) for item in test]) + '\n')
1379 txt_file_name = "{0}.txt".format(table["output-file"])
1380 logging.info(" Writing file: '{0}'".format(txt_file_name))
1381 convert_csv_to_pretty_txt(file_name, txt_file_name)
1384 def table_failed_tests_html(table, input_data):
1385 """Generate the table(s) with algorithm: table_failed_tests_html
1386 specified in the specification file.
1388 :param table: Table to generate.
1389 :param input_data: Data to process.
1390 :type table: pandas.Series
1391 :type input_data: InputData
1394 testbed = table.get("testbed", None)
1396 logging.error("The testbed is not defined for the table '{0}'.".
1397 format(table.get("title", "")))
1400 logging.info(" Generating the table {0} ...".
1401 format(table.get("title", "")))
1404 with open(table["input-file"], 'rb') as csv_file:
1405 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1406 csv_lst = [item for item in csv_content]
1408 logging.warning("The input file is not defined.")
1410 except csv.Error as err:
1411 logging.warning("Not possible to process the file '{0}'.\n{1}".
1412 format(table["input-file"], err))
1416 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1419 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1420 for idx, item in enumerate(csv_lst[0]):
1421 alignment = "left" if idx == 0 else "center"
1422 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1426 colors = ("#e9f1fb", "#d4e4f7")
1427 for r_idx, row in enumerate(csv_lst[1:]):
1428 background = colors[r_idx % 2]
1429 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1432 for c_idx, item in enumerate(row):
1433 alignment = "left" if c_idx == 0 else "center"
1434 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1437 url = _generate_url("../trending/", testbed, item)
1438 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1443 with open(table["output-file"], 'w') as html_file:
1444 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1445 html_file.write(".. raw:: html\n\n\t")
1446 html_file.write(ET.tostring(failed_tests))
1447 html_file.write("\n\t<p><br><br></p>\n")
1449 logging.warning("The output file is not defined.")