1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table, continue_on_error=True)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(
146 table, continue_on_error=True, data_set="suites")
147 suites = input_data.merge_data(suites)
149 # Prepare the header of the tables
151 for column in table["columns"]:
152 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
154 for _, suite in suites.iteritems():
156 suite_name = suite["name"]
158 for test in data.keys():
159 if data[test]["parent"] in suite_name:
161 for column in table["columns"]:
163 col_data = str(data[test][column["data"].
164 split(" ")[1]]).replace('"', '""')
165 col_data = replace(col_data, "No Data",
167 if column["data"].split(" ")[1] in ("conf-history",
169 col_data = replace(col_data, " |br| ", "",
171 col_data = " |prein| {0} |preout| ".\
172 format(col_data[:-5])
173 row_lst.append('"{0}"'.format(col_data))
175 row_lst.append('"Not captured"')
176 table_lst.append(row_lst)
178 # Write the data to file
180 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
181 table["output-file-ext"])
182 logging.info(" Writing file: '{}'".format(file_name))
183 with open(file_name, "w") as file_handler:
184 file_handler.write(",".join(header) + "\n")
185 for item in table_lst:
186 file_handler.write(",".join(item) + "\n")
188 logging.info(" Done.")
191 def _tpc_modify_test_name(test_name):
192 test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
193 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
194 replace("-ndrdisc", "").replace("-pdr", ""). \
195 replace("-ndr", ""). \
196 replace("1t1c", "1c").replace("2t1c", "1c"). \
197 replace("2t2c", "2c").replace("4t2c", "2c"). \
198 replace("4t4c", "4c").replace("8t4c", "4c")
199 test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
203 def _tpc_modify_displayed_test_name(test_name):
204 return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
205 replace("2t2c", "2c").replace("4t2c", "2c"). \
206 replace("4t4c", "4c").replace("8t4c", "4c")
209 def _tpc_insert_data(target, src, include_tests):
211 if include_tests == "MRR":
212 target.append(src["result"]["receive-rate"].avg)
213 elif include_tests == "PDR":
214 target.append(src["throughput"]["PDR"]["LOWER"])
215 elif include_tests == "NDR":
216 target.append(src["throughput"]["NDR"]["LOWER"])
217 except (KeyError, TypeError):
221 def _tpc_sort_table(table):
223 # 1. New in CSIT-XXXX
230 if isinstance(item[-1], str):
231 if "New in CSIT" in item[-1]:
233 elif "See footnote" in item[-1]:
236 tbl_delta.append(item)
239 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
240 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
241 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
242 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
244 # Put the tables together:
246 table.extend(tbl_new)
247 table.extend(tbl_see)
248 table.extend(tbl_delta)
253 def table_performance_comparison(table, input_data):
254 """Generate the table(s) with algorithm: table_performance_comparison
255 specified in the specification file.
257 :param table: Table to generate.
258 :param input_data: Data to process.
259 :type table: pandas.Series
260 :type input_data: InputData
263 logging.info(" Generating the table {0} ...".
264 format(table.get("title", "")))
267 logging.info(" Creating the data set for the {0} '{1}'.".
268 format(table.get("type", ""), table.get("title", "")))
269 data = input_data.filter_data(table, continue_on_error=True)
271 # Prepare the header of the tables
273 header = ["Test case", ]
275 if table["include-tests"] == "MRR":
276 hdr_param = "Rec Rate"
280 history = table.get("history", None)
284 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
285 "{0} Stdev [Mpps]".format(item["title"])])
287 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
288 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
289 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
290 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
292 header_str = ",".join(header) + "\n"
293 except (AttributeError, KeyError) as err:
294 logging.error("The model is invalid, missing parameter: {0}".
298 # Prepare data to the table:
300 for job, builds in table["reference"]["data"].items():
301 topo = "2n-skx" if "2n-skx" in job else ""
303 for tst_name, tst_data in data[job][str(build)].iteritems():
304 tst_name_mod = _tpc_modify_test_name(tst_name)
305 if "across topologies" in table["title"].lower():
306 tst_name_mod = tst_name_mod.replace("2n1l-", "")
307 if tbl_dict.get(tst_name_mod, None) is None:
308 groups = re.search(REGEX_NIC, tst_data["parent"])
309 nic = groups.group(0) if groups else ""
310 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
312 if "across testbeds" in table["title"].lower() or \
313 "across topologies" in table["title"].lower():
314 name = _tpc_modify_displayed_test_name(name)
315 tbl_dict[tst_name_mod] = {"name": name,
318 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
320 include_tests=table["include-tests"])
322 for job, builds in table["compare"]["data"].items():
324 for tst_name, tst_data in data[job][str(build)].iteritems():
325 tst_name_mod = _tpc_modify_test_name(tst_name)
326 if "across topologies" in table["title"].lower():
327 tst_name_mod = tst_name_mod.replace("2n1l-", "")
328 if tbl_dict.get(tst_name_mod, None) is None:
329 groups = re.search(REGEX_NIC, tst_data["parent"])
330 nic = groups.group(0) if groups else ""
331 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
333 if "across testbeds" in table["title"].lower() or \
334 "across topologies" in table["title"].lower():
335 name = _tpc_modify_displayed_test_name(name)
336 tbl_dict[tst_name_mod] = {"name": name,
339 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
341 include_tests=table["include-tests"])
343 replacement = table["compare"].get("data-replacement", None)
345 create_new_list = True
346 rpl_data = input_data.filter_data(
347 table, data=replacement, continue_on_error=True)
348 for job, builds in replacement.items():
350 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
351 tst_name_mod = _tpc_modify_test_name(tst_name)
352 if "across topologies" in table["title"].lower():
353 tst_name_mod = tst_name_mod.replace("2n1l-", "")
354 if tbl_dict.get(tst_name_mod, None) is None:
355 name = "{0}".format("-".join(tst_data["name"].
357 if "across testbeds" in table["title"].lower() or \
358 "across topologies" in table["title"].lower():
359 name = _tpc_modify_displayed_test_name(name)
360 tbl_dict[tst_name_mod] = {"name": name,
364 create_new_list = False
365 tbl_dict[tst_name_mod]["cmp-data"] = list()
367 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
369 include_tests=table["include-tests"])
373 for job, builds in item["data"].items():
375 for tst_name, tst_data in data[job][str(build)].iteritems():
376 tst_name_mod = _tpc_modify_test_name(tst_name)
377 if "across topologies" in table["title"].lower():
378 tst_name_mod = tst_name_mod.replace("2n1l-", "")
379 if tbl_dict.get(tst_name_mod, None) is None:
381 if tbl_dict[tst_name_mod].get("history", None) is None:
382 tbl_dict[tst_name_mod]["history"] = OrderedDict()
383 if tbl_dict[tst_name_mod]["history"].get(item["title"],
385 tbl_dict[tst_name_mod]["history"][item["title"]] = \
388 # TODO: Re-work when NDRPDRDISC tests are not used
389 if table["include-tests"] == "MRR":
390 tbl_dict[tst_name_mod]["history"][item["title"
391 ]].append(tst_data["result"]["receive-rate"].
393 elif table["include-tests"] == "PDR":
394 if tst_data["type"] == "PDR":
395 tbl_dict[tst_name_mod]["history"][
397 append(tst_data["throughput"]["value"])
398 elif tst_data["type"] == "NDRPDR":
399 tbl_dict[tst_name_mod]["history"][item[
400 "title"]].append(tst_data["throughput"][
402 elif table["include-tests"] == "NDR":
403 if tst_data["type"] == "NDR":
404 tbl_dict[tst_name_mod]["history"][
406 append(tst_data["throughput"]["value"])
407 elif tst_data["type"] == "NDRPDR":
408 tbl_dict[tst_name_mod]["history"][item[
409 "title"]].append(tst_data["throughput"][
413 except (TypeError, KeyError):
418 for tst_name in tbl_dict.keys():
419 item = [tbl_dict[tst_name]["name"], ]
421 if tbl_dict[tst_name].get("history", None) is not None:
422 for hist_data in tbl_dict[tst_name]["history"].values():
424 item.append(round(mean(hist_data) / 1000000, 2))
425 item.append(round(stdev(hist_data) / 1000000, 2))
427 item.extend(["Not tested", "Not tested"])
429 item.extend(["Not tested", "Not tested"])
430 data_t = tbl_dict[tst_name]["ref-data"]
432 item.append(round(mean(data_t) / 1000000, 2))
433 item.append(round(stdev(data_t) / 1000000, 2))
435 item.extend(["Not tested", "Not tested"])
436 data_t = tbl_dict[tst_name]["cmp-data"]
438 item.append(round(mean(data_t) / 1000000, 2))
439 item.append(round(stdev(data_t) / 1000000, 2))
441 item.extend(["Not tested", "Not tested"])
442 if item[-2] == "Not tested":
444 elif item[-4] == "Not tested":
445 item.append("New in CSIT-1908")
446 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
447 item.append("See footnote [1]")
450 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
451 if (len(item) == len(header)) and (item[-3] != "Not tested"):
454 tbl_lst = _tpc_sort_table(tbl_lst)
456 # Generate csv tables:
457 csv_file = "{0}.csv".format(table["output-file"])
458 with open(csv_file, "w") as file_handler:
459 file_handler.write(header_str)
461 file_handler.write(",".join([str(item) for item in test]) + "\n")
463 txt_file_name = "{0}.txt".format(table["output-file"])
464 convert_csv_to_pretty_txt(csv_file, txt_file_name)
467 with open(txt_file_name, 'a') as txt_file:
468 txt_file.writelines([
470 "[1] CSIT-1908 changed test methodology of dot1q tests in "
471 "2-node testbeds, dot1q encapsulation is now used on both "
473 " Previously dot1q was used only on a single link with the "
474 "other link carrying untagged Ethernet frames. This changes "
476 " in slightly lower throughput in CSIT-1908 for these "
477 "tests. See release notes."
481 def table_performance_comparison_nic(table, input_data):
482 """Generate the table(s) with algorithm: table_performance_comparison
483 specified in the specification file.
485 :param table: Table to generate.
486 :param input_data: Data to process.
487 :type table: pandas.Series
488 :type input_data: InputData
491 logging.info(" Generating the table {0} ...".
492 format(table.get("title", "")))
495 logging.info(" Creating the data set for the {0} '{1}'.".
496 format(table.get("type", ""), table.get("title", "")))
497 data = input_data.filter_data(table, continue_on_error=True)
499 # Prepare the header of the tables
501 header = ["Test case", ]
503 if table["include-tests"] == "MRR":
504 hdr_param = "Rec Rate"
508 history = table.get("history", None)
512 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
513 "{0} Stdev [Mpps]".format(item["title"])])
515 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
516 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
517 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
518 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
520 header_str = ",".join(header) + "\n"
521 except (AttributeError, KeyError) as err:
522 logging.error("The model is invalid, missing parameter: {0}".
526 # Prepare data to the table:
528 for job, builds in table["reference"]["data"].items():
529 topo = "2n-skx" if "2n-skx" in job else ""
531 for tst_name, tst_data in data[job][str(build)].iteritems():
532 if table["reference"]["nic"] not in tst_data["tags"]:
534 tst_name_mod = _tpc_modify_test_name(tst_name)
535 if "across topologies" in table["title"].lower():
536 tst_name_mod = tst_name_mod.replace("2n1l-", "")
537 if tbl_dict.get(tst_name_mod, None) is None:
538 name = "{0}".format("-".join(tst_data["name"].
540 if "across testbeds" in table["title"].lower() or \
541 "across topologies" in table["title"].lower():
542 name = _tpc_modify_displayed_test_name(name)
543 tbl_dict[tst_name_mod] = {"name": name,
546 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
548 include_tests=table["include-tests"])
550 for job, builds in table["compare"]["data"].items():
552 for tst_name, tst_data in data[job][str(build)].iteritems():
553 if table["compare"]["nic"] not in tst_data["tags"]:
555 tst_name_mod = _tpc_modify_test_name(tst_name)
556 if "across topologies" in table["title"].lower():
557 tst_name_mod = tst_name_mod.replace("2n1l-", "")
558 if tbl_dict.get(tst_name_mod, None) is None:
559 name = "{0}".format("-".join(tst_data["name"].
561 if "across testbeds" in table["title"].lower() or \
562 "across topologies" in table["title"].lower():
563 name = _tpc_modify_displayed_test_name(name)
564 tbl_dict[tst_name_mod] = {"name": name,
567 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
569 include_tests=table["include-tests"])
571 replacement = table["compare"].get("data-replacement", None)
573 create_new_list = True
574 rpl_data = input_data.filter_data(
575 table, data=replacement, continue_on_error=True)
576 for job, builds in replacement.items():
578 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
579 if table["compare"]["nic"] not in tst_data["tags"]:
581 tst_name_mod = _tpc_modify_test_name(tst_name)
582 if "across topologies" in table["title"].lower():
583 tst_name_mod = tst_name_mod.replace("2n1l-", "")
584 if tbl_dict.get(tst_name_mod, None) is None:
585 name = "{0}".format("-".join(tst_data["name"].
587 if "across testbeds" in table["title"].lower() or \
588 "across topologies" in table["title"].lower():
589 name = _tpc_modify_displayed_test_name(name)
590 tbl_dict[tst_name_mod] = {"name": name,
594 create_new_list = False
595 tbl_dict[tst_name_mod]["cmp-data"] = list()
597 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
599 include_tests=table["include-tests"])
603 for job, builds in item["data"].items():
605 for tst_name, tst_data in data[job][str(build)].iteritems():
606 if item["nic"] not in tst_data["tags"]:
608 tst_name_mod = _tpc_modify_test_name(tst_name)
609 if "across topologies" in table["title"].lower():
610 tst_name_mod = tst_name_mod.replace("2n1l-", "")
611 if tbl_dict.get(tst_name_mod, None) is None:
613 if tbl_dict[tst_name_mod].get("history", None) is None:
614 tbl_dict[tst_name_mod]["history"] = OrderedDict()
615 if tbl_dict[tst_name_mod]["history"].get(item["title"],
617 tbl_dict[tst_name_mod]["history"][item["title"]] = \
620 # TODO: Re-work when NDRPDRDISC tests are not used
621 if table["include-tests"] == "MRR":
622 tbl_dict[tst_name_mod]["history"][item["title"
623 ]].append(tst_data["result"]["receive-rate"].
625 elif table["include-tests"] == "PDR":
626 if tst_data["type"] == "PDR":
627 tbl_dict[tst_name_mod]["history"][
629 append(tst_data["throughput"]["value"])
630 elif tst_data["type"] == "NDRPDR":
631 tbl_dict[tst_name_mod]["history"][item[
632 "title"]].append(tst_data["throughput"][
634 elif table["include-tests"] == "NDR":
635 if tst_data["type"] == "NDR":
636 tbl_dict[tst_name_mod]["history"][
638 append(tst_data["throughput"]["value"])
639 elif tst_data["type"] == "NDRPDR":
640 tbl_dict[tst_name_mod]["history"][item[
641 "title"]].append(tst_data["throughput"][
645 except (TypeError, KeyError):
650 for tst_name in tbl_dict.keys():
651 item = [tbl_dict[tst_name]["name"], ]
653 if tbl_dict[tst_name].get("history", None) is not None:
654 for hist_data in tbl_dict[tst_name]["history"].values():
656 item.append(round(mean(hist_data) / 1000000, 2))
657 item.append(round(stdev(hist_data) / 1000000, 2))
659 item.extend(["Not tested", "Not tested"])
661 item.extend(["Not tested", "Not tested"])
662 data_t = tbl_dict[tst_name]["ref-data"]
664 item.append(round(mean(data_t) / 1000000, 2))
665 item.append(round(stdev(data_t) / 1000000, 2))
667 item.extend(["Not tested", "Not tested"])
668 data_t = tbl_dict[tst_name]["cmp-data"]
670 item.append(round(mean(data_t) / 1000000, 2))
671 item.append(round(stdev(data_t) / 1000000, 2))
673 item.extend(["Not tested", "Not tested"])
674 if item[-2] == "Not tested":
676 elif item[-4] == "Not tested":
677 item.append("New in CSIT-1908")
678 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
679 item.append("See footnote [1]")
682 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
683 if (len(item) == len(header)) and (item[-3] != "Not tested"):
686 tbl_lst = _tpc_sort_table(tbl_lst)
688 # Generate csv tables:
689 csv_file = "{0}.csv".format(table["output-file"])
690 with open(csv_file, "w") as file_handler:
691 file_handler.write(header_str)
693 file_handler.write(",".join([str(item) for item in test]) + "\n")
695 txt_file_name = "{0}.txt".format(table["output-file"])
696 convert_csv_to_pretty_txt(csv_file, txt_file_name)
699 with open(txt_file_name, 'a') as txt_file:
700 txt_file.writelines([
702 "[1] CSIT-1908 changed test methodology of dot1q tests in "
703 "2-node testbeds, dot1q encapsulation is now used on both "
705 " Previously dot1q was used only on a single link with the "
706 "other link carrying untagged Ethernet frames. This changes "
708 " in slightly lower throughput in CSIT-1908 for these "
709 "tests. See release notes."
713 def table_nics_comparison(table, input_data):
714 """Generate the table(s) with algorithm: table_nics_comparison
715 specified in the specification file.
717 :param table: Table to generate.
718 :param input_data: Data to process.
719 :type table: pandas.Series
720 :type input_data: InputData
723 logging.info(" Generating the table {0} ...".
724 format(table.get("title", "")))
727 logging.info(" Creating the data set for the {0} '{1}'.".
728 format(table.get("type", ""), table.get("title", "")))
729 data = input_data.filter_data(table, continue_on_error=True)
731 # Prepare the header of the tables
733 header = ["Test case", ]
735 if table["include-tests"] == "MRR":
736 hdr_param = "Rec Rate"
741 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
742 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
743 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
744 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
746 header_str = ",".join(header) + "\n"
747 except (AttributeError, KeyError) as err:
748 logging.error("The model is invalid, missing parameter: {0}".
752 # Prepare data to the table:
754 for job, builds in table["data"].items():
756 for tst_name, tst_data in data[job][str(build)].iteritems():
757 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
758 replace("-ndrpdr", "").replace("-pdrdisc", "").\
759 replace("-ndrdisc", "").replace("-pdr", "").\
760 replace("-ndr", "").\
761 replace("1t1c", "1c").replace("2t1c", "1c").\
762 replace("2t2c", "2c").replace("4t2c", "2c").\
763 replace("4t4c", "4c").replace("8t4c", "4c")
764 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
765 if tbl_dict.get(tst_name_mod, None) is None:
766 name = "-".join(tst_data["name"].split("-")[:-1])
767 tbl_dict[tst_name_mod] = {"name": name,
771 if table["include-tests"] == "MRR":
772 result = tst_data["result"]["receive-rate"].avg
773 elif table["include-tests"] == "PDR":
774 result = tst_data["throughput"]["PDR"]["LOWER"]
775 elif table["include-tests"] == "NDR":
776 result = tst_data["throughput"]["NDR"]["LOWER"]
781 if table["reference"]["nic"] in tst_data["tags"]:
782 tbl_dict[tst_name_mod]["ref-data"].append(result)
783 elif table["compare"]["nic"] in tst_data["tags"]:
784 tbl_dict[tst_name_mod]["cmp-data"].append(result)
785 except (TypeError, KeyError) as err:
786 logging.debug("No data for {0}".format(tst_name))
787 logging.debug(repr(err))
788 # No data in output.xml for this test
791 for tst_name in tbl_dict.keys():
792 item = [tbl_dict[tst_name]["name"], ]
793 data_t = tbl_dict[tst_name]["ref-data"]
795 item.append(round(mean(data_t) / 1000000, 2))
796 item.append(round(stdev(data_t) / 1000000, 2))
798 item.extend([None, None])
799 data_t = tbl_dict[tst_name]["cmp-data"]
801 item.append(round(mean(data_t) / 1000000, 2))
802 item.append(round(stdev(data_t) / 1000000, 2))
804 item.extend([None, None])
805 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
806 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
807 if len(item) == len(header):
810 # Sort the table according to the relative change
811 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
813 # Generate csv tables:
814 csv_file = "{0}.csv".format(table["output-file"])
815 with open(csv_file, "w") as file_handler:
816 file_handler.write(header_str)
818 file_handler.write(",".join([str(item) for item in test]) + "\n")
820 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
823 def table_soak_vs_ndr(table, input_data):
824 """Generate the table(s) with algorithm: table_soak_vs_ndr
825 specified in the specification file.
827 :param table: Table to generate.
828 :param input_data: Data to process.
829 :type table: pandas.Series
830 :type input_data: InputData
833 logging.info(" Generating the table {0} ...".
834 format(table.get("title", "")))
837 logging.info(" Creating the data set for the {0} '{1}'.".
838 format(table.get("type", ""), table.get("title", "")))
839 data = input_data.filter_data(table, continue_on_error=True)
841 # Prepare the header of the table
845 "{0} Thput [Mpps]".format(table["reference"]["title"]),
846 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
847 "{0} Thput [Mpps]".format(table["compare"]["title"]),
848 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
849 "Delta [%]", "Stdev of delta [%]"]
850 header_str = ",".join(header) + "\n"
851 except (AttributeError, KeyError) as err:
852 logging.error("The model is invalid, missing parameter: {0}".
856 # Create a list of available SOAK test results:
858 for job, builds in table["compare"]["data"].items():
860 for tst_name, tst_data in data[job][str(build)].iteritems():
861 if tst_data["type"] == "SOAK":
862 tst_name_mod = tst_name.replace("-soak", "")
863 if tbl_dict.get(tst_name_mod, None) is None:
864 groups = re.search(REGEX_NIC, tst_data["parent"])
865 nic = groups.group(0) if groups else ""
866 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
868 tbl_dict[tst_name_mod] = {
874 tbl_dict[tst_name_mod]["cmp-data"].append(
875 tst_data["throughput"]["LOWER"])
876 except (KeyError, TypeError):
878 tests_lst = tbl_dict.keys()
880 # Add corresponding NDR test results:
881 for job, builds in table["reference"]["data"].items():
883 for tst_name, tst_data in data[job][str(build)].iteritems():
884 tst_name_mod = tst_name.replace("-ndrpdr", "").\
886 if tst_name_mod in tests_lst:
888 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
889 if table["include-tests"] == "MRR":
890 result = tst_data["result"]["receive-rate"].avg
891 elif table["include-tests"] == "PDR":
892 result = tst_data["throughput"]["PDR"]["LOWER"]
893 elif table["include-tests"] == "NDR":
894 result = tst_data["throughput"]["NDR"]["LOWER"]
897 if result is not None:
898 tbl_dict[tst_name_mod]["ref-data"].append(
900 except (KeyError, TypeError):
904 for tst_name in tbl_dict.keys():
905 item = [tbl_dict[tst_name]["name"], ]
906 data_r = tbl_dict[tst_name]["ref-data"]
908 data_r_mean = mean(data_r)
909 item.append(round(data_r_mean / 1000000, 2))
910 data_r_stdev = stdev(data_r)
911 item.append(round(data_r_stdev / 1000000, 2))
915 item.extend([None, None])
916 data_c = tbl_dict[tst_name]["cmp-data"]
918 data_c_mean = mean(data_c)
919 item.append(round(data_c_mean / 1000000, 2))
920 data_c_stdev = stdev(data_c)
921 item.append(round(data_c_stdev / 1000000, 2))
925 item.extend([None, None])
926 if data_r_mean and data_c_mean:
927 delta, d_stdev = relative_change_stdev(
928 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
929 item.append(round(delta, 2))
930 item.append(round(d_stdev, 2))
933 # Sort the table according to the relative change
934 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
936 # Generate csv tables:
937 csv_file = "{0}.csv".format(table["output-file"])
938 with open(csv_file, "w") as file_handler:
939 file_handler.write(header_str)
941 file_handler.write(",".join([str(item) for item in test]) + "\n")
943 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
946 def table_performance_trending_dashboard(table, input_data):
947 """Generate the table(s) with algorithm:
948 table_performance_trending_dashboard
949 specified in the specification file.
951 :param table: Table to generate.
952 :param input_data: Data to process.
953 :type table: pandas.Series
954 :type input_data: InputData
957 logging.info(" Generating the table {0} ...".
958 format(table.get("title", "")))
961 logging.info(" Creating the data set for the {0} '{1}'.".
962 format(table.get("type", ""), table.get("title", "")))
963 data = input_data.filter_data(table, continue_on_error=True)
965 # Prepare the header of the tables
966 header = ["Test Case",
968 "Short-Term Change [%]",
969 "Long-Term Change [%]",
973 header_str = ",".join(header) + "\n"
975 # Prepare data to the table:
977 for job, builds in table["data"].items():
979 for tst_name, tst_data in data[job][str(build)].iteritems():
980 if tst_name.lower() in table.get("ignore-list", list()):
982 if tbl_dict.get(tst_name, None) is None:
983 groups = re.search(REGEX_NIC, tst_data["parent"])
986 nic = groups.group(0)
987 tbl_dict[tst_name] = {
988 "name": "{0}-{1}".format(nic, tst_data["name"]),
989 "data": OrderedDict()}
991 tbl_dict[tst_name]["data"][str(build)] = \
992 tst_data["result"]["receive-rate"]
993 except (TypeError, KeyError):
994 pass # No data in output.xml for this test
997 for tst_name in tbl_dict.keys():
998 data_t = tbl_dict[tst_name]["data"]
1002 classification_lst, avgs = classify_anomalies(data_t)
1004 win_size = min(len(data_t), table["window"])
1005 long_win_size = min(len(data_t), table["long-trend-window"])
1009 [x for x in avgs[-long_win_size:-win_size]
1014 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1016 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1017 rel_change_last = nan
1019 rel_change_last = round(
1020 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1022 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1023 rel_change_long = nan
1025 rel_change_long = round(
1026 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1028 if classification_lst:
1029 if isnan(rel_change_last) and isnan(rel_change_long):
1031 if (isnan(last_avg) or
1032 isnan(rel_change_last) or
1033 isnan(rel_change_long)):
1036 [tbl_dict[tst_name]["name"],
1037 round(last_avg / 1000000, 2),
1040 classification_lst[-win_size:].count("regression"),
1041 classification_lst[-win_size:].count("progression")])
1043 tbl_lst.sort(key=lambda rel: rel[0])
1046 for nrr in range(table["window"], -1, -1):
1047 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1048 for nrp in range(table["window"], -1, -1):
1049 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1050 tbl_out.sort(key=lambda rel: rel[2])
1051 tbl_sorted.extend(tbl_out)
1053 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1055 logging.info(" Writing file: '{0}'".format(file_name))
1056 with open(file_name, "w") as file_handler:
1057 file_handler.write(header_str)
1058 for test in tbl_sorted:
1059 file_handler.write(",".join([str(item) for item in test]) + '\n')
1061 txt_file_name = "{0}.txt".format(table["output-file"])
1062 logging.info(" Writing file: '{0}'".format(txt_file_name))
1063 convert_csv_to_pretty_txt(file_name, txt_file_name)
1066 def _generate_url(base, testbed, test_name):
1067 """Generate URL to a trending plot from the name of the test case.
1069 :param base: The base part of URL common to all test cases.
1070 :param testbed: The testbed used for testing.
1071 :param test_name: The name of the test case.
1074 :type test_name: str
1075 :returns: The URL to the plot with the trending data for the given test
1085 if "lbdpdk" in test_name or "lbvpp" in test_name:
1086 file_name = "link_bonding"
1088 elif "114b" in test_name and "vhost" in test_name:
1091 elif "testpmd" in test_name or "l3fwd" in test_name:
1094 elif "memif" in test_name:
1095 file_name = "container_memif"
1098 elif "srv6" in test_name:
1101 elif "vhost" in test_name:
1102 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1103 file_name = "vm_vhost_l2"
1104 if "114b" in test_name:
1106 elif "l2xcbase" in test_name and "x520" in test_name:
1107 feature = "-base-l2xc"
1108 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1109 feature = "-base-l2bd"
1112 elif "ip4base" in test_name:
1113 file_name = "vm_vhost_ip4"
1116 elif "ipsecbasetnlsw" in test_name:
1117 file_name = "ipsecsw"
1118 feature = "-base-scale"
1120 elif "ipsec" in test_name:
1122 feature = "-base-scale"
1123 if "hw-" in test_name:
1124 file_name = "ipsechw"
1125 elif "sw-" in test_name:
1126 file_name = "ipsecsw"
1128 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1129 file_name = "ip4_tunnels"
1132 elif "ip4base" in test_name or "ip4scale" in test_name:
1134 if "xl710" in test_name:
1135 feature = "-base-scale-features"
1136 elif "iacl" in test_name:
1137 feature = "-features-iacl"
1138 elif "oacl" in test_name:
1139 feature = "-features-oacl"
1140 elif "snat" in test_name or "cop" in test_name:
1141 feature = "-features"
1143 feature = "-base-scale"
1145 elif "ip6base" in test_name or "ip6scale" in test_name:
1147 feature = "-base-scale"
1149 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1150 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1151 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1153 if "macip" in test_name:
1154 feature = "-features-macip"
1155 elif "iacl" in test_name:
1156 feature = "-features-iacl"
1157 elif "oacl" in test_name:
1158 feature = "-features-oacl"
1160 feature = "-base-scale"
1162 if "x520" in test_name:
1164 elif "x710" in test_name:
1166 elif "xl710" in test_name:
1168 elif "xxv710" in test_name:
1170 elif "vic1227" in test_name:
1172 elif "vic1385" in test_name:
1178 if "64b" in test_name:
1180 elif "78b" in test_name:
1182 elif "imix" in test_name:
1184 elif "9000b" in test_name:
1186 elif "1518b" in test_name:
1188 elif "114b" in test_name:
1192 anchor += framesize + '-'
1194 if "1t1c" in test_name:
1196 elif "2t2c" in test_name:
1198 elif "4t4c" in test_name:
1200 elif "2t1c" in test_name:
1202 elif "4t2c" in test_name:
1204 elif "8t4c" in test_name:
1207 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1208 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1211 def table_performance_trending_dashboard_html(table, input_data):
1212 """Generate the table(s) with algorithm:
1213 table_performance_trending_dashboard_html specified in the specification
1216 :param table: Table to generate.
1217 :param input_data: Data to process.
1219 :type input_data: InputData
1222 testbed = table.get("testbed", None)
1224 logging.error("The testbed is not defined for the table '{0}'.".
1225 format(table.get("title", "")))
1228 logging.info(" Generating the table {0} ...".
1229 format(table.get("title", "")))
1232 with open(table["input-file"], 'rb') as csv_file:
1233 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1234 csv_lst = [item for item in csv_content]
1236 logging.warning("The input file is not defined.")
1238 except csv.Error as err:
1239 logging.warning("Not possible to process the file '{0}'.\n{1}".
1240 format(table["input-file"], err))
1244 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1247 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1248 for idx, item in enumerate(csv_lst[0]):
1249 alignment = "left" if idx == 0 else "center"
1250 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1254 colors = {"regression": ("#ffcccc", "#ff9999"),
1255 "progression": ("#c6ecc6", "#9fdf9f"),
1256 "normal": ("#e9f1fb", "#d4e4f7")}
1257 for r_idx, row in enumerate(csv_lst[1:]):
1259 color = "regression"
1261 color = "progression"
1264 background = colors[color][r_idx % 2]
1265 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1268 for c_idx, item in enumerate(row):
1269 alignment = "left" if c_idx == 0 else "center"
1270 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1273 url = _generate_url("../trending/", testbed, item)
1274 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1279 with open(table["output-file"], 'w') as html_file:
1280 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1281 html_file.write(".. raw:: html\n\n\t")
1282 html_file.write(ET.tostring(dashboard))
1283 html_file.write("\n\t<p><br><br></p>\n")
1285 logging.warning("The output file is not defined.")
1289 def table_last_failed_tests(table, input_data):
1290 """Generate the table(s) with algorithm: table_last_failed_tests
1291 specified in the specification file.
1293 :param table: Table to generate.
1294 :param input_data: Data to process.
1295 :type table: pandas.Series
1296 :type input_data: InputData
1299 logging.info(" Generating the table {0} ...".
1300 format(table.get("title", "")))
1302 # Transform the data
1303 logging.info(" Creating the data set for the {0} '{1}'.".
1304 format(table.get("type", ""), table.get("title", "")))
1305 data = input_data.filter_data(table, continue_on_error=True)
1307 if data is None or data.empty:
1308 logging.warn(" No data for the {0} '{1}'.".
1309 format(table.get("type", ""), table.get("title", "")))
1313 for job, builds in table["data"].items():
1314 for build in builds:
1317 version = input_data.metadata(job, build).get("version", "")
1319 logging.error("Data for {job}: {build} is not present.".
1320 format(job=job, build=build))
1322 tbl_list.append(build)
1323 tbl_list.append(version)
1324 for tst_name, tst_data in data[job][build].iteritems():
1325 if tst_data["status"] != "FAIL":
1327 groups = re.search(REGEX_NIC, tst_data["parent"])
1330 nic = groups.group(0)
1331 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1333 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1334 logging.info(" Writing file: '{0}'".format(file_name))
1335 with open(file_name, "w") as file_handler:
1336 for test in tbl_list:
1337 file_handler.write(test + '\n')
1340 def table_failed_tests(table, input_data):
1341 """Generate the table(s) with algorithm: table_failed_tests
1342 specified in the specification file.
1344 :param table: Table to generate.
1345 :param input_data: Data to process.
1346 :type table: pandas.Series
1347 :type input_data: InputData
1350 logging.info(" Generating the table {0} ...".
1351 format(table.get("title", "")))
1353 # Transform the data
1354 logging.info(" Creating the data set for the {0} '{1}'.".
1355 format(table.get("type", ""), table.get("title", "")))
1356 data = input_data.filter_data(table, continue_on_error=True)
1358 # Prepare the header of the tables
1359 header = ["Test Case",
1361 "Last Failure [Time]",
1362 "Last Failure [VPP-Build-Id]",
1363 "Last Failure [CSIT-Job-Build-Id]"]
1365 # Generate the data for the table according to the model in the table
1369 timeperiod = timedelta(int(table.get("window", 7)))
1372 for job, builds in table["data"].items():
1373 for build in builds:
1375 for tst_name, tst_data in data[job][build].iteritems():
1376 if tst_name.lower() in table.get("ignore-list", list()):
1378 if tbl_dict.get(tst_name, None) is None:
1379 groups = re.search(REGEX_NIC, tst_data["parent"])
1382 nic = groups.group(0)
1383 tbl_dict[tst_name] = {
1384 "name": "{0}-{1}".format(nic, tst_data["name"]),
1385 "data": OrderedDict()}
1387 generated = input_data.metadata(job, build).\
1388 get("generated", "")
1391 then = dt.strptime(generated, "%Y%m%d %H:%M")
1392 if (now - then) <= timeperiod:
1393 tbl_dict[tst_name]["data"][build] = (
1396 input_data.metadata(job, build).get("version", ""),
1398 except (TypeError, KeyError) as err:
1399 logging.warning("tst_name: {} - err: {}".
1400 format(tst_name, repr(err)))
1404 for tst_data in tbl_dict.values():
1406 for val in tst_data["data"].values():
1407 if val[0] == "FAIL":
1409 fails_last_date = val[1]
1410 fails_last_vpp = val[2]
1411 fails_last_csit = val[3]
1413 max_fails = fails_nr if fails_nr > max_fails else max_fails
1414 tbl_lst.append([tst_data["name"],
1418 "mrr-daily-build-{0}".format(fails_last_csit)])
1420 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1422 for nrf in range(max_fails, -1, -1):
1423 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1424 tbl_sorted.extend(tbl_fails)
1425 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1427 logging.info(" Writing file: '{0}'".format(file_name))
1428 with open(file_name, "w") as file_handler:
1429 file_handler.write(",".join(header) + "\n")
1430 for test in tbl_sorted:
1431 file_handler.write(",".join([str(item) for item in test]) + '\n')
1433 txt_file_name = "{0}.txt".format(table["output-file"])
1434 logging.info(" Writing file: '{0}'".format(txt_file_name))
1435 convert_csv_to_pretty_txt(file_name, txt_file_name)
1438 def table_failed_tests_html(table, input_data):
1439 """Generate the table(s) with algorithm: table_failed_tests_html
1440 specified in the specification file.
1442 :param table: Table to generate.
1443 :param input_data: Data to process.
1444 :type table: pandas.Series
1445 :type input_data: InputData
1448 testbed = table.get("testbed", None)
1450 logging.error("The testbed is not defined for the table '{0}'.".
1451 format(table.get("title", "")))
1454 logging.info(" Generating the table {0} ...".
1455 format(table.get("title", "")))
1458 with open(table["input-file"], 'rb') as csv_file:
1459 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1460 csv_lst = [item for item in csv_content]
1462 logging.warning("The input file is not defined.")
1464 except csv.Error as err:
1465 logging.warning("Not possible to process the file '{0}'.\n{1}".
1466 format(table["input-file"], err))
1470 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1473 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1474 for idx, item in enumerate(csv_lst[0]):
1475 alignment = "left" if idx == 0 else "center"
1476 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1480 colors = ("#e9f1fb", "#d4e4f7")
1481 for r_idx, row in enumerate(csv_lst[1:]):
1482 background = colors[r_idx % 2]
1483 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1486 for c_idx, item in enumerate(row):
1487 alignment = "left" if c_idx == 0 else "center"
1488 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1491 url = _generate_url("../trending/", testbed, item)
1492 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1497 with open(table["output-file"], 'w') as html_file:
1498 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1499 html_file.write(".. raw:: html\n\n\t")
1500 html_file.write(ET.tostring(failed_tests))
1501 html_file.write("\n\t<p><br><br></p>\n")
1503 logging.warning("The output file is not defined.")