1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def _tpc_modify_test_name(test_name):
191 test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
192 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
193 replace("-ndrdisc", "").replace("-pdr", ""). \
194 replace("-ndr", ""). \
195 replace("1t1c", "1c").replace("2t1c", "1c"). \
196 replace("2t2c", "2c").replace("4t2c", "2c"). \
197 replace("4t4c", "4c").replace("8t4c", "4c")
198 test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
202 def _tpc_modify_displayed_test_name(test_name):
203 return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
204 replace("2t2c", "2c").replace("4t2c", "2c"). \
205 replace("4t4c", "4c").replace("8t4c", "4c")
208 def _tpc_insert_data(target, src, include_tests):
210 if include_tests == "MRR":
211 target.append(src["result"]["receive-rate"].avg)
212 elif include_tests == "PDR":
213 target.append(src["throughput"]["PDR"]["LOWER"])
214 elif include_tests == "NDR":
215 target.append(src["throughput"]["NDR"]["LOWER"])
216 except (KeyError, TypeError):
220 def _tpc_sort_table(table):
222 # 1. New in CSIT-XXXX
229 if isinstance(item[-1], str):
230 if "New in CSIT" in item[-1]:
232 elif "See footnote" in item[-1]:
235 tbl_delta.append(item)
238 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
239 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
240 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
241 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
243 # Put the tables together:
245 table.extend(tbl_new)
246 table.extend(tbl_see)
247 table.extend(tbl_delta)
252 def table_performance_comparison(table, input_data):
253 """Generate the table(s) with algorithm: table_performance_comparison
254 specified in the specification file.
256 :param table: Table to generate.
257 :param input_data: Data to process.
258 :type table: pandas.Series
259 :type input_data: InputData
262 logging.info(" Generating the table {0} ...".
263 format(table.get("title", "")))
266 logging.info(" Creating the data set for the {0} '{1}'.".
267 format(table.get("type", ""), table.get("title", "")))
268 data = input_data.filter_data(table, continue_on_error=True)
270 # Prepare the header of the tables
272 header = ["Test case", ]
274 if table["include-tests"] == "MRR":
275 hdr_param = "Rec Rate"
279 history = table.get("history", None)
283 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
284 "{0} Stdev [Mpps]".format(item["title"])])
286 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
287 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
288 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
289 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
291 header_str = ",".join(header) + "\n"
292 except (AttributeError, KeyError) as err:
293 logging.error("The model is invalid, missing parameter: {0}".
297 # Prepare data to the table:
299 for job, builds in table["reference"]["data"].items():
300 topo = "2n-skx" if "2n-skx" in job else ""
302 for tst_name, tst_data in data[job][str(build)].iteritems():
303 tst_name_mod = _tpc_modify_test_name(tst_name)
304 if "across topologies" in table["title"].lower():
305 tst_name_mod = tst_name_mod.replace("2n1l-", "")
306 if tbl_dict.get(tst_name_mod, None) is None:
307 groups = re.search(REGEX_NIC, tst_data["parent"])
308 nic = groups.group(0) if groups else ""
309 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
311 if "across testbeds" in table["title"].lower() or \
312 "across topologies" in table["title"].lower():
313 name = _tpc_modify_displayed_test_name(name)
314 tbl_dict[tst_name_mod] = {"name": name,
317 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
319 include_tests=table["include-tests"])
321 for job, builds in table["compare"]["data"].items():
323 for tst_name, tst_data in data[job][str(build)].iteritems():
324 tst_name_mod = _tpc_modify_test_name(tst_name)
325 if "across topologies" in table["title"].lower():
326 tst_name_mod = tst_name_mod.replace("2n1l-", "")
327 if tbl_dict.get(tst_name_mod, None) is None:
328 groups = re.search(REGEX_NIC, tst_data["parent"])
329 nic = groups.group(0) if groups else ""
330 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
332 if "across testbeds" in table["title"].lower() or \
333 "across topologies" in table["title"].lower():
334 name = _tpc_modify_displayed_test_name(name)
335 tbl_dict[tst_name_mod] = {"name": name,
338 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
340 include_tests=table["include-tests"])
342 replacement = table["compare"].get("data-replacement", None)
344 create_new_list = True
345 rpl_data = input_data.filter_data(
346 table, data=replacement, continue_on_error=True)
347 for job, builds in replacement.items():
349 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
350 tst_name_mod = _tpc_modify_test_name(tst_name)
351 if "across topologies" in table["title"].lower():
352 tst_name_mod = tst_name_mod.replace("2n1l-", "")
353 if tbl_dict.get(tst_name_mod, None) is None:
354 name = "{0}".format("-".join(tst_data["name"].
356 if "across testbeds" in table["title"].lower() or \
357 "across topologies" in table["title"].lower():
358 name = _tpc_modify_displayed_test_name(name)
359 tbl_dict[tst_name_mod] = {"name": name,
363 create_new_list = False
364 tbl_dict[tst_name_mod]["cmp-data"] = list()
366 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
368 include_tests=table["include-tests"])
372 for job, builds in item["data"].items():
374 for tst_name, tst_data in data[job][str(build)].iteritems():
375 tst_name_mod = _tpc_modify_test_name(tst_name)
376 if "across topologies" in table["title"].lower():
377 tst_name_mod = tst_name_mod.replace("2n1l-", "")
378 if tbl_dict.get(tst_name_mod, None) is None:
380 if tbl_dict[tst_name_mod].get("history", None) is None:
381 tbl_dict[tst_name_mod]["history"] = OrderedDict()
382 if tbl_dict[tst_name_mod]["history"].get(item["title"],
384 tbl_dict[tst_name_mod]["history"][item["title"]] = \
387 # TODO: Re-work when NDRPDRDISC tests are not used
388 if table["include-tests"] == "MRR":
389 tbl_dict[tst_name_mod]["history"][item["title"
390 ]].append(tst_data["result"]["receive-rate"].
392 elif table["include-tests"] == "PDR":
393 if tst_data["type"] == "PDR":
394 tbl_dict[tst_name_mod]["history"][
396 append(tst_data["throughput"]["value"])
397 elif tst_data["type"] == "NDRPDR":
398 tbl_dict[tst_name_mod]["history"][item[
399 "title"]].append(tst_data["throughput"][
401 elif table["include-tests"] == "NDR":
402 if tst_data["type"] == "NDR":
403 tbl_dict[tst_name_mod]["history"][
405 append(tst_data["throughput"]["value"])
406 elif tst_data["type"] == "NDRPDR":
407 tbl_dict[tst_name_mod]["history"][item[
408 "title"]].append(tst_data["throughput"][
412 except (TypeError, KeyError):
417 for tst_name in tbl_dict.keys():
418 item = [tbl_dict[tst_name]["name"], ]
420 if tbl_dict[tst_name].get("history", None) is not None:
421 for hist_data in tbl_dict[tst_name]["history"].values():
423 item.append(round(mean(hist_data) / 1000000, 2))
424 item.append(round(stdev(hist_data) / 1000000, 2))
426 item.extend(["Not tested", "Not tested"])
428 item.extend(["Not tested", "Not tested"])
429 data_t = tbl_dict[tst_name]["ref-data"]
431 item.append(round(mean(data_t) / 1000000, 2))
432 item.append(round(stdev(data_t) / 1000000, 2))
434 item.extend(["Not tested", "Not tested"])
435 data_t = tbl_dict[tst_name]["cmp-data"]
437 item.append(round(mean(data_t) / 1000000, 2))
438 item.append(round(stdev(data_t) / 1000000, 2))
440 item.extend(["Not tested", "Not tested"])
441 if item[-2] == "Not tested":
443 elif item[-4] == "Not tested":
444 item.append("New in CSIT-1908")
445 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
446 item.append("See footnote [1]")
449 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
450 if (len(item) == len(header)) and (item[-3] != "Not tested"):
453 tbl_lst = _tpc_sort_table(tbl_lst)
455 # Generate csv tables:
456 csv_file = "{0}.csv".format(table["output-file"])
457 with open(csv_file, "w") as file_handler:
458 file_handler.write(header_str)
460 file_handler.write(",".join([str(item) for item in test]) + "\n")
462 txt_file_name = "{0}.txt".format(table["output-file"])
463 convert_csv_to_pretty_txt(csv_file, txt_file_name)
466 with open(txt_file_name, 'a') as txt_file:
467 txt_file.writelines([
469 "[1] CSIT-1908 changed test methodology of dot1q tests in "
470 "2-node testbeds, dot1q encapsulation is now used on both "
472 " Previously dot1q was used only on a single link with the "
473 "other link carrying untagged Ethernet frames. This changes "
475 " in slightly lower throughput in CSIT-1908 for these "
476 "tests. See release notes."
480 def table_performance_comparison_nic(table, input_data):
481 """Generate the table(s) with algorithm: table_performance_comparison
482 specified in the specification file.
484 :param table: Table to generate.
485 :param input_data: Data to process.
486 :type table: pandas.Series
487 :type input_data: InputData
490 logging.info(" Generating the table {0} ...".
491 format(table.get("title", "")))
494 logging.info(" Creating the data set for the {0} '{1}'.".
495 format(table.get("type", ""), table.get("title", "")))
496 data = input_data.filter_data(table, continue_on_error=True)
498 # Prepare the header of the tables
500 header = ["Test case", ]
502 if table["include-tests"] == "MRR":
503 hdr_param = "Rec Rate"
507 history = table.get("history", None)
511 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
512 "{0} Stdev [Mpps]".format(item["title"])])
514 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
515 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
516 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
517 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
519 header_str = ",".join(header) + "\n"
520 except (AttributeError, KeyError) as err:
521 logging.error("The model is invalid, missing parameter: {0}".
525 # Prepare data to the table:
527 for job, builds in table["reference"]["data"].items():
528 topo = "2n-skx" if "2n-skx" in job else ""
530 for tst_name, tst_data in data[job][str(build)].iteritems():
531 if table["reference"]["nic"] not in tst_data["tags"]:
533 tst_name_mod = _tpc_modify_test_name(tst_name)
534 if "across topologies" in table["title"].lower():
535 tst_name_mod = tst_name_mod.replace("2n1l-", "")
536 if tbl_dict.get(tst_name_mod, None) is None:
537 name = "{0}".format("-".join(tst_data["name"].
539 if "across testbeds" in table["title"].lower() or \
540 "across topologies" in table["title"].lower():
541 name = _tpc_modify_displayed_test_name(name)
542 tbl_dict[tst_name_mod] = {"name": name,
545 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
547 include_tests=table["include-tests"])
549 for job, builds in table["compare"]["data"].items():
551 for tst_name, tst_data in data[job][str(build)].iteritems():
552 if table["compare"]["nic"] not in tst_data["tags"]:
554 tst_name_mod = _tpc_modify_test_name(tst_name)
555 if "across topologies" in table["title"].lower():
556 tst_name_mod = tst_name_mod.replace("2n1l-", "")
557 if tbl_dict.get(tst_name_mod, None) is None:
558 name = "{0}".format("-".join(tst_data["name"].
560 if "across testbeds" in table["title"].lower() or \
561 "across topologies" in table["title"].lower():
562 name = _tpc_modify_displayed_test_name(name)
563 tbl_dict[tst_name_mod] = {"name": name,
566 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
568 include_tests=table["include-tests"])
570 replacement = table["compare"].get("data-replacement", None)
572 create_new_list = True
573 rpl_data = input_data.filter_data(
574 table, data=replacement, continue_on_error=True)
575 for job, builds in replacement.items():
577 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
578 if table["compare"]["nic"] not in tst_data["tags"]:
580 tst_name_mod = _tpc_modify_test_name(tst_name)
581 if "across topologies" in table["title"].lower():
582 tst_name_mod = tst_name_mod.replace("2n1l-", "")
583 if tbl_dict.get(tst_name_mod, None) is None:
584 name = "{0}".format("-".join(tst_data["name"].
586 if "across testbeds" in table["title"].lower() or \
587 "across topologies" in table["title"].lower():
588 name = _tpc_modify_displayed_test_name(name)
589 tbl_dict[tst_name_mod] = {"name": name,
593 create_new_list = False
594 tbl_dict[tst_name_mod]["cmp-data"] = list()
596 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
598 include_tests=table["include-tests"])
602 for job, builds in item["data"].items():
604 for tst_name, tst_data in data[job][str(build)].iteritems():
605 if item["nic"] not in tst_data["tags"]:
607 tst_name_mod = _tpc_modify_test_name(tst_name)
608 if "across topologies" in table["title"].lower():
609 tst_name_mod = tst_name_mod.replace("2n1l-", "")
610 if tbl_dict.get(tst_name_mod, None) is None:
612 if tbl_dict[tst_name_mod].get("history", None) is None:
613 tbl_dict[tst_name_mod]["history"] = OrderedDict()
614 if tbl_dict[tst_name_mod]["history"].get(item["title"],
616 tbl_dict[tst_name_mod]["history"][item["title"]] = \
619 # TODO: Re-work when NDRPDRDISC tests are not used
620 if table["include-tests"] == "MRR":
621 tbl_dict[tst_name_mod]["history"][item["title"
622 ]].append(tst_data["result"]["receive-rate"].
624 elif table["include-tests"] == "PDR":
625 if tst_data["type"] == "PDR":
626 tbl_dict[tst_name_mod]["history"][
628 append(tst_data["throughput"]["value"])
629 elif tst_data["type"] == "NDRPDR":
630 tbl_dict[tst_name_mod]["history"][item[
631 "title"]].append(tst_data["throughput"][
633 elif table["include-tests"] == "NDR":
634 if tst_data["type"] == "NDR":
635 tbl_dict[tst_name_mod]["history"][
637 append(tst_data["throughput"]["value"])
638 elif tst_data["type"] == "NDRPDR":
639 tbl_dict[tst_name_mod]["history"][item[
640 "title"]].append(tst_data["throughput"][
644 except (TypeError, KeyError):
649 for tst_name in tbl_dict.keys():
650 item = [tbl_dict[tst_name]["name"], ]
652 if tbl_dict[tst_name].get("history", None) is not None:
653 for hist_data in tbl_dict[tst_name]["history"].values():
655 item.append(round(mean(hist_data) / 1000000, 2))
656 item.append(round(stdev(hist_data) / 1000000, 2))
658 item.extend(["Not tested", "Not tested"])
660 item.extend(["Not tested", "Not tested"])
661 data_t = tbl_dict[tst_name]["ref-data"]
663 item.append(round(mean(data_t) / 1000000, 2))
664 item.append(round(stdev(data_t) / 1000000, 2))
666 item.extend(["Not tested", "Not tested"])
667 data_t = tbl_dict[tst_name]["cmp-data"]
669 item.append(round(mean(data_t) / 1000000, 2))
670 item.append(round(stdev(data_t) / 1000000, 2))
672 item.extend(["Not tested", "Not tested"])
673 if item[-2] == "Not tested":
675 elif item[-4] == "Not tested":
676 item.append("New in CSIT-1908")
677 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
678 item.append("See footnote [1]")
681 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
682 if (len(item) == len(header)) and (item[-3] != "Not tested"):
685 tbl_lst = _tpc_sort_table(tbl_lst)
687 # Generate csv tables:
688 csv_file = "{0}.csv".format(table["output-file"])
689 with open(csv_file, "w") as file_handler:
690 file_handler.write(header_str)
692 file_handler.write(",".join([str(item) for item in test]) + "\n")
694 txt_file_name = "{0}.txt".format(table["output-file"])
695 convert_csv_to_pretty_txt(csv_file, txt_file_name)
698 with open(txt_file_name, 'a') as txt_file:
699 txt_file.writelines([
701 "[1] CSIT-1908 changed test methodology of dot1q tests in "
702 "2-node testbeds, dot1q encapsulation is now used on both "
704 " Previously dot1q was used only on a single link with the "
705 "other link carrying untagged Ethernet frames. This changes "
707 " in slightly lower throughput in CSIT-1908 for these "
708 "tests. See release notes."
712 def table_nics_comparison(table, input_data):
713 """Generate the table(s) with algorithm: table_nics_comparison
714 specified in the specification file.
716 :param table: Table to generate.
717 :param input_data: Data to process.
718 :type table: pandas.Series
719 :type input_data: InputData
722 logging.info(" Generating the table {0} ...".
723 format(table.get("title", "")))
726 logging.info(" Creating the data set for the {0} '{1}'.".
727 format(table.get("type", ""), table.get("title", "")))
728 data = input_data.filter_data(table, continue_on_error=True)
730 # Prepare the header of the tables
732 header = ["Test case", ]
734 if table["include-tests"] == "MRR":
735 hdr_param = "Rec Rate"
740 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
741 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
742 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
743 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
745 header_str = ",".join(header) + "\n"
746 except (AttributeError, KeyError) as err:
747 logging.error("The model is invalid, missing parameter: {0}".
751 # Prepare data to the table:
753 for job, builds in table["data"].items():
755 for tst_name, tst_data in data[job][str(build)].iteritems():
756 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
757 replace("-ndrpdr", "").replace("-pdrdisc", "").\
758 replace("-ndrdisc", "").replace("-pdr", "").\
759 replace("-ndr", "").\
760 replace("1t1c", "1c").replace("2t1c", "1c").\
761 replace("2t2c", "2c").replace("4t2c", "2c").\
762 replace("4t4c", "4c").replace("8t4c", "4c")
763 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
764 if tbl_dict.get(tst_name_mod, None) is None:
765 name = "-".join(tst_data["name"].split("-")[:-1])
766 tbl_dict[tst_name_mod] = {"name": name,
770 if table["include-tests"] == "MRR":
771 result = tst_data["result"]["receive-rate"].avg
772 elif table["include-tests"] == "PDR":
773 result = tst_data["throughput"]["PDR"]["LOWER"]
774 elif table["include-tests"] == "NDR":
775 result = tst_data["throughput"]["NDR"]["LOWER"]
780 if table["reference"]["nic"] in tst_data["tags"]:
781 tbl_dict[tst_name_mod]["ref-data"].append(result)
782 elif table["compare"]["nic"] in tst_data["tags"]:
783 tbl_dict[tst_name_mod]["cmp-data"].append(result)
784 except (TypeError, KeyError) as err:
785 logging.debug("No data for {0}".format(tst_name))
786 logging.debug(repr(err))
787 # No data in output.xml for this test
790 for tst_name in tbl_dict.keys():
791 item = [tbl_dict[tst_name]["name"], ]
792 data_t = tbl_dict[tst_name]["ref-data"]
794 item.append(round(mean(data_t) / 1000000, 2))
795 item.append(round(stdev(data_t) / 1000000, 2))
797 item.extend([None, None])
798 data_t = tbl_dict[tst_name]["cmp-data"]
800 item.append(round(mean(data_t) / 1000000, 2))
801 item.append(round(stdev(data_t) / 1000000, 2))
803 item.extend([None, None])
804 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
805 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
806 if len(item) == len(header):
809 # Sort the table according to the relative change
810 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
812 # Generate csv tables:
813 csv_file = "{0}.csv".format(table["output-file"])
814 with open(csv_file, "w") as file_handler:
815 file_handler.write(header_str)
817 file_handler.write(",".join([str(item) for item in test]) + "\n")
819 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
822 def table_soak_vs_ndr(table, input_data):
823 """Generate the table(s) with algorithm: table_soak_vs_ndr
824 specified in the specification file.
826 :param table: Table to generate.
827 :param input_data: Data to process.
828 :type table: pandas.Series
829 :type input_data: InputData
832 logging.info(" Generating the table {0} ...".
833 format(table.get("title", "")))
836 logging.info(" Creating the data set for the {0} '{1}'.".
837 format(table.get("type", ""), table.get("title", "")))
838 data = input_data.filter_data(table, continue_on_error=True)
840 # Prepare the header of the table
844 "{0} Thput [Mpps]".format(table["reference"]["title"]),
845 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
846 "{0} Thput [Mpps]".format(table["compare"]["title"]),
847 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
848 "Delta [%]", "Stdev of delta [%]"]
849 header_str = ",".join(header) + "\n"
850 except (AttributeError, KeyError) as err:
851 logging.error("The model is invalid, missing parameter: {0}".
855 # Create a list of available SOAK test results:
857 for job, builds in table["compare"]["data"].items():
859 for tst_name, tst_data in data[job][str(build)].iteritems():
860 if tst_data["type"] == "SOAK":
861 tst_name_mod = tst_name.replace("-soak", "")
862 if tbl_dict.get(tst_name_mod, None) is None:
863 groups = re.search(REGEX_NIC, tst_data["parent"])
864 nic = groups.group(0) if groups else ""
865 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
867 tbl_dict[tst_name_mod] = {
873 tbl_dict[tst_name_mod]["cmp-data"].append(
874 tst_data["throughput"]["LOWER"])
875 except (KeyError, TypeError):
877 tests_lst = tbl_dict.keys()
879 # Add corresponding NDR test results:
880 for job, builds in table["reference"]["data"].items():
882 for tst_name, tst_data in data[job][str(build)].iteritems():
883 tst_name_mod = tst_name.replace("-ndrpdr", "").\
885 if tst_name_mod in tests_lst:
887 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
888 if table["include-tests"] == "MRR":
889 result = tst_data["result"]["receive-rate"].avg
890 elif table["include-tests"] == "PDR":
891 result = tst_data["throughput"]["PDR"]["LOWER"]
892 elif table["include-tests"] == "NDR":
893 result = tst_data["throughput"]["NDR"]["LOWER"]
896 if result is not None:
897 tbl_dict[tst_name_mod]["ref-data"].append(
899 except (KeyError, TypeError):
903 for tst_name in tbl_dict.keys():
904 item = [tbl_dict[tst_name]["name"], ]
905 data_r = tbl_dict[tst_name]["ref-data"]
907 data_r_mean = mean(data_r)
908 item.append(round(data_r_mean / 1000000, 2))
909 data_r_stdev = stdev(data_r)
910 item.append(round(data_r_stdev / 1000000, 2))
914 item.extend([None, None])
915 data_c = tbl_dict[tst_name]["cmp-data"]
917 data_c_mean = mean(data_c)
918 item.append(round(data_c_mean / 1000000, 2))
919 data_c_stdev = stdev(data_c)
920 item.append(round(data_c_stdev / 1000000, 2))
924 item.extend([None, None])
925 if data_r_mean and data_c_mean:
926 delta, d_stdev = relative_change_stdev(
927 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
928 item.append(round(delta, 2))
929 item.append(round(d_stdev, 2))
932 # Sort the table according to the relative change
933 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
935 # Generate csv tables:
936 csv_file = "{0}.csv".format(table["output-file"])
937 with open(csv_file, "w") as file_handler:
938 file_handler.write(header_str)
940 file_handler.write(",".join([str(item) for item in test]) + "\n")
942 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
945 def table_performance_trending_dashboard(table, input_data):
946 """Generate the table(s) with algorithm:
947 table_performance_trending_dashboard
948 specified in the specification file.
950 :param table: Table to generate.
951 :param input_data: Data to process.
952 :type table: pandas.Series
953 :type input_data: InputData
956 logging.info(" Generating the table {0} ...".
957 format(table.get("title", "")))
960 logging.info(" Creating the data set for the {0} '{1}'.".
961 format(table.get("type", ""), table.get("title", "")))
962 data = input_data.filter_data(table, continue_on_error=True)
964 # Prepare the header of the tables
965 header = ["Test Case",
967 "Short-Term Change [%]",
968 "Long-Term Change [%]",
972 header_str = ",".join(header) + "\n"
974 # Prepare data to the table:
976 for job, builds in table["data"].items():
978 for tst_name, tst_data in data[job][str(build)].iteritems():
979 if tst_name.lower() in table.get("ignore-list", list()):
981 if tbl_dict.get(tst_name, None) is None:
982 groups = re.search(REGEX_NIC, tst_data["parent"])
985 nic = groups.group(0)
986 tbl_dict[tst_name] = {
987 "name": "{0}-{1}".format(nic, tst_data["name"]),
988 "data": OrderedDict()}
990 tbl_dict[tst_name]["data"][str(build)] = \
991 tst_data["result"]["receive-rate"]
992 except (TypeError, KeyError):
993 pass # No data in output.xml for this test
996 for tst_name in tbl_dict.keys():
997 data_t = tbl_dict[tst_name]["data"]
1001 classification_lst, avgs = classify_anomalies(data_t)
1003 win_size = min(len(data_t), table["window"])
1004 long_win_size = min(len(data_t), table["long-trend-window"])
1008 [x for x in avgs[-long_win_size:-win_size]
1013 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1015 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1016 rel_change_last = nan
1018 rel_change_last = round(
1019 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1021 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1022 rel_change_long = nan
1024 rel_change_long = round(
1025 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1027 if classification_lst:
1028 if isnan(rel_change_last) and isnan(rel_change_long):
1030 if (isnan(last_avg) or
1031 isnan(rel_change_last) or
1032 isnan(rel_change_long)):
1035 [tbl_dict[tst_name]["name"],
1036 round(last_avg / 1000000, 2),
1039 classification_lst[-win_size:].count("regression"),
1040 classification_lst[-win_size:].count("progression")])
1042 tbl_lst.sort(key=lambda rel: rel[0])
1045 for nrr in range(table["window"], -1, -1):
1046 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1047 for nrp in range(table["window"], -1, -1):
1048 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1049 tbl_out.sort(key=lambda rel: rel[2])
1050 tbl_sorted.extend(tbl_out)
1052 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1054 logging.info(" Writing file: '{0}'".format(file_name))
1055 with open(file_name, "w") as file_handler:
1056 file_handler.write(header_str)
1057 for test in tbl_sorted:
1058 file_handler.write(",".join([str(item) for item in test]) + '\n')
1060 txt_file_name = "{0}.txt".format(table["output-file"])
1061 logging.info(" Writing file: '{0}'".format(txt_file_name))
1062 convert_csv_to_pretty_txt(file_name, txt_file_name)
1065 def _generate_url(base, testbed, test_name):
1066 """Generate URL to a trending plot from the name of the test case.
1068 :param base: The base part of URL common to all test cases.
1069 :param testbed: The testbed used for testing.
1070 :param test_name: The name of the test case.
1073 :type test_name: str
1074 :returns: The URL to the plot with the trending data for the given test
1084 if "lbdpdk" in test_name or "lbvpp" in test_name:
1085 file_name = "link_bonding"
1087 elif "114b" in test_name and "vhost" in test_name:
1090 elif "testpmd" in test_name or "l3fwd" in test_name:
1093 elif "memif" in test_name:
1094 file_name = "container_memif"
1097 elif "srv6" in test_name:
1100 elif "vhost" in test_name:
1101 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1102 file_name = "vm_vhost_l2"
1103 if "114b" in test_name:
1105 elif "l2xcbase" in test_name and "x520" in test_name:
1106 feature = "-base-l2xc"
1107 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1108 feature = "-base-l2bd"
1111 elif "ip4base" in test_name:
1112 file_name = "vm_vhost_ip4"
1115 elif "ipsecbasetnlsw" in test_name:
1116 file_name = "ipsecsw"
1117 feature = "-base-scale"
1119 elif "ipsec" in test_name:
1121 feature = "-base-scale"
1122 if "hw-" in test_name:
1123 file_name = "ipsechw"
1124 elif "sw-" in test_name:
1125 file_name = "ipsecsw"
1127 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1128 file_name = "ip4_tunnels"
1131 elif "ip4base" in test_name or "ip4scale" in test_name:
1133 if "xl710" in test_name:
1134 feature = "-base-scale-features"
1135 elif "iacl" in test_name:
1136 feature = "-features-iacl"
1137 elif "oacl" in test_name:
1138 feature = "-features-oacl"
1139 elif "snat" in test_name or "cop" in test_name:
1140 feature = "-features"
1142 feature = "-base-scale"
1144 elif "ip6base" in test_name or "ip6scale" in test_name:
1146 feature = "-base-scale"
1148 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1149 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1150 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1152 if "macip" in test_name:
1153 feature = "-features-macip"
1154 elif "iacl" in test_name:
1155 feature = "-features-iacl"
1156 elif "oacl" in test_name:
1157 feature = "-features-oacl"
1159 feature = "-base-scale"
1161 if "x520" in test_name:
1163 elif "x710" in test_name:
1165 elif "xl710" in test_name:
1167 elif "xxv710" in test_name:
1169 elif "vic1227" in test_name:
1171 elif "vic1385" in test_name:
1177 if "64b" in test_name:
1179 elif "78b" in test_name:
1181 elif "imix" in test_name:
1183 elif "9000b" in test_name:
1185 elif "1518b" in test_name:
1187 elif "114b" in test_name:
1191 anchor += framesize + '-'
1193 if "1t1c" in test_name:
1195 elif "2t2c" in test_name:
1197 elif "4t4c" in test_name:
1199 elif "2t1c" in test_name:
1201 elif "4t2c" in test_name:
1203 elif "8t4c" in test_name:
1206 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1207 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1210 def table_performance_trending_dashboard_html(table, input_data):
1211 """Generate the table(s) with algorithm:
1212 table_performance_trending_dashboard_html specified in the specification
1215 :param table: Table to generate.
1216 :param input_data: Data to process.
1218 :type input_data: InputData
1221 testbed = table.get("testbed", None)
1223 logging.error("The testbed is not defined for the table '{0}'.".
1224 format(table.get("title", "")))
1227 logging.info(" Generating the table {0} ...".
1228 format(table.get("title", "")))
1231 with open(table["input-file"], 'rb') as csv_file:
1232 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1233 csv_lst = [item for item in csv_content]
1235 logging.warning("The input file is not defined.")
1237 except csv.Error as err:
1238 logging.warning("Not possible to process the file '{0}'.\n{1}".
1239 format(table["input-file"], err))
1243 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1246 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1247 for idx, item in enumerate(csv_lst[0]):
1248 alignment = "left" if idx == 0 else "center"
1249 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1253 colors = {"regression": ("#ffcccc", "#ff9999"),
1254 "progression": ("#c6ecc6", "#9fdf9f"),
1255 "normal": ("#e9f1fb", "#d4e4f7")}
1256 for r_idx, row in enumerate(csv_lst[1:]):
1258 color = "regression"
1260 color = "progression"
1263 background = colors[color][r_idx % 2]
1264 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1267 for c_idx, item in enumerate(row):
1268 alignment = "left" if c_idx == 0 else "center"
1269 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1272 url = _generate_url("../trending/", testbed, item)
1273 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1278 with open(table["output-file"], 'w') as html_file:
1279 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1280 html_file.write(".. raw:: html\n\n\t")
1281 html_file.write(ET.tostring(dashboard))
1282 html_file.write("\n\t<p><br><br></p>\n")
1284 logging.warning("The output file is not defined.")
1288 def table_last_failed_tests(table, input_data):
1289 """Generate the table(s) with algorithm: table_last_failed_tests
1290 specified in the specification file.
1292 :param table: Table to generate.
1293 :param input_data: Data to process.
1294 :type table: pandas.Series
1295 :type input_data: InputData
1298 logging.info(" Generating the table {0} ...".
1299 format(table.get("title", "")))
1301 # Transform the data
1302 logging.info(" Creating the data set for the {0} '{1}'.".
1303 format(table.get("type", ""), table.get("title", "")))
1304 data = input_data.filter_data(table, continue_on_error=True)
1306 if data is None or data.empty:
1307 logging.warn(" No data for the {0} '{1}'.".
1308 format(table.get("type", ""), table.get("title", "")))
1312 for job, builds in table["data"].items():
1313 for build in builds:
1316 version = input_data.metadata(job, build).get("version", "")
1318 logging.error("Data for {job}: {build} is not present.".
1319 format(job=job, build=build))
1321 tbl_list.append(build)
1322 tbl_list.append(version)
1323 for tst_name, tst_data in data[job][build].iteritems():
1324 if tst_data["status"] != "FAIL":
1326 groups = re.search(REGEX_NIC, tst_data["parent"])
1329 nic = groups.group(0)
1330 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1332 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1333 logging.info(" Writing file: '{0}'".format(file_name))
1334 with open(file_name, "w") as file_handler:
1335 for test in tbl_list:
1336 file_handler.write(test + '\n')
1339 def table_failed_tests(table, input_data):
1340 """Generate the table(s) with algorithm: table_failed_tests
1341 specified in the specification file.
1343 :param table: Table to generate.
1344 :param input_data: Data to process.
1345 :type table: pandas.Series
1346 :type input_data: InputData
1349 logging.info(" Generating the table {0} ...".
1350 format(table.get("title", "")))
1352 # Transform the data
1353 logging.info(" Creating the data set for the {0} '{1}'.".
1354 format(table.get("type", ""), table.get("title", "")))
1355 data = input_data.filter_data(table, continue_on_error=True)
1357 # Prepare the header of the tables
1358 header = ["Test Case",
1360 "Last Failure [Time]",
1361 "Last Failure [VPP-Build-Id]",
1362 "Last Failure [CSIT-Job-Build-Id]"]
1364 # Generate the data for the table according to the model in the table
1368 timeperiod = timedelta(int(table.get("window", 7)))
1371 for job, builds in table["data"].items():
1372 for build in builds:
1374 for tst_name, tst_data in data[job][build].iteritems():
1375 if tst_name.lower() in table.get("ignore-list", list()):
1377 if tbl_dict.get(tst_name, None) is None:
1378 groups = re.search(REGEX_NIC, tst_data["parent"])
1381 nic = groups.group(0)
1382 tbl_dict[tst_name] = {
1383 "name": "{0}-{1}".format(nic, tst_data["name"]),
1384 "data": OrderedDict()}
1386 generated = input_data.metadata(job, build).\
1387 get("generated", "")
1390 then = dt.strptime(generated, "%Y%m%d %H:%M")
1391 if (now - then) <= timeperiod:
1392 tbl_dict[tst_name]["data"][build] = (
1395 input_data.metadata(job, build).get("version", ""),
1397 except (TypeError, KeyError) as err:
1398 logging.warning("tst_name: {} - err: {}".
1399 format(tst_name, repr(err)))
1403 for tst_data in tbl_dict.values():
1405 for val in tst_data["data"].values():
1406 if val[0] == "FAIL":
1408 fails_last_date = val[1]
1409 fails_last_vpp = val[2]
1410 fails_last_csit = val[3]
1412 max_fails = fails_nr if fails_nr > max_fails else max_fails
1413 tbl_lst.append([tst_data["name"],
1417 "mrr-daily-build-{0}".format(fails_last_csit)])
1419 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1421 for nrf in range(max_fails, -1, -1):
1422 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1423 tbl_sorted.extend(tbl_fails)
1424 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1426 logging.info(" Writing file: '{0}'".format(file_name))
1427 with open(file_name, "w") as file_handler:
1428 file_handler.write(",".join(header) + "\n")
1429 for test in tbl_sorted:
1430 file_handler.write(",".join([str(item) for item in test]) + '\n')
1432 txt_file_name = "{0}.txt".format(table["output-file"])
1433 logging.info(" Writing file: '{0}'".format(txt_file_name))
1434 convert_csv_to_pretty_txt(file_name, txt_file_name)
1437 def table_failed_tests_html(table, input_data):
1438 """Generate the table(s) with algorithm: table_failed_tests_html
1439 specified in the specification file.
1441 :param table: Table to generate.
1442 :param input_data: Data to process.
1443 :type table: pandas.Series
1444 :type input_data: InputData
1447 testbed = table.get("testbed", None)
1449 logging.error("The testbed is not defined for the table '{0}'.".
1450 format(table.get("title", "")))
1453 logging.info(" Generating the table {0} ...".
1454 format(table.get("title", "")))
1457 with open(table["input-file"], 'rb') as csv_file:
1458 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1459 csv_lst = [item for item in csv_content]
1461 logging.warning("The input file is not defined.")
1463 except csv.Error as err:
1464 logging.warning("Not possible to process the file '{0}'.\n{1}".
1465 format(table["input-file"], err))
1469 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1472 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1473 for idx, item in enumerate(csv_lst[0]):
1474 alignment = "left" if idx == 0 else "center"
1475 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1479 colors = ("#e9f1fb", "#d4e4f7")
1480 for r_idx, row in enumerate(csv_lst[1:]):
1481 background = colors[r_idx % 2]
1482 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1485 for c_idx, item in enumerate(row):
1486 alignment = "left" if c_idx == 0 else "center"
1487 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1490 url = _generate_url("../trending/", testbed, item)
1491 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1496 with open(table["output-file"], 'w') as html_file:
1497 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1498 html_file.write(".. raw:: html\n\n\t")
1499 html_file.write(ET.tostring(failed_tests))
1500 html_file.write("\n\t<p><br><br></p>\n")
1502 logging.warning("The output file is not defined.")