1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Receive Rate"
215 hdr_param = "Throughput"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
239 for tst_name, tst_data in data[job][str(build)].iteritems():
240 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241 replace("-ndrpdr", "").replace("-pdrdisc", "").\
242 replace("-ndrdisc", "").replace("-pdr", "").\
243 replace("-ndr", "").\
244 replace("1t1c", "1c").replace("2t1c", "1c").\
245 replace("2t2c", "2c").replace("4t2c", "2c").\
246 replace("4t4c", "4c").replace("8t4c", "4c")
247 if "across topologies" in table["title"].lower():
248 tst_name_mod = tst_name_mod.replace("2n1l-", "")
249 if tbl_dict.get(tst_name_mod, None) is None:
250 groups = re.search(REGEX_NIC, tst_data["parent"])
251 nic = groups.group(0) if groups else ""
252 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254 if "across testbeds" in table["title"].lower() or \
255 "across topologies" in table["title"].lower():
257 replace("1t1c", "1c").replace("2t1c", "1c").\
258 replace("2t2c", "2c").replace("4t2c", "2c").\
259 replace("4t4c", "4c").replace("8t4c", "4c")
260 tbl_dict[tst_name_mod] = {"name": name,
264 # TODO: Re-work when NDRPDRDISC tests are not used
265 if table["include-tests"] == "MRR":
266 tbl_dict[tst_name_mod]["ref-data"]. \
267 append(tst_data["result"]["receive-rate"].avg)
268 elif table["include-tests"] == "PDR":
269 if tst_data["type"] == "PDR":
270 tbl_dict[tst_name_mod]["ref-data"]. \
271 append(tst_data["throughput"]["value"])
272 elif tst_data["type"] == "NDRPDR":
273 tbl_dict[tst_name_mod]["ref-data"].append(
274 tst_data["throughput"]["PDR"]["LOWER"])
275 elif table["include-tests"] == "NDR":
276 if tst_data["type"] == "NDR":
277 tbl_dict[tst_name_mod]["ref-data"]. \
278 append(tst_data["throughput"]["value"])
279 elif tst_data["type"] == "NDRPDR":
280 tbl_dict[tst_name_mod]["ref-data"].append(
281 tst_data["throughput"]["NDR"]["LOWER"])
285 pass # No data in output.xml for this test
287 for job, builds in table["compare"]["data"].items():
289 for tst_name, tst_data in data[job][str(build)].iteritems():
290 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292 replace("-ndrdisc", "").replace("-pdr", ""). \
293 replace("-ndr", "").\
294 replace("1t1c", "1c").replace("2t1c", "1c").\
295 replace("2t2c", "2c").replace("4t2c", "2c").\
296 replace("4t4c", "4c").replace("8t4c", "4c")
297 if "across topologies" in table["title"].lower():
298 tst_name_mod = tst_name_mod.replace("2n1l-", "")
299 if tbl_dict.get(tst_name_mod, None) is None:
300 groups = re.search(REGEX_NIC, tst_data["parent"])
301 nic = groups.group(0) if groups else ""
302 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
304 if "across testbeds" in table["title"].lower() or \
305 "across topologies" in table["title"].lower():
307 replace("1t1c", "1c").replace("2t1c", "1c").\
308 replace("2t2c", "2c").replace("4t2c", "2c").\
309 replace("4t4c", "4c").replace("8t4c", "4c")
310 tbl_dict[tst_name_mod] = {"name": name,
314 # TODO: Re-work when NDRPDRDISC tests are not used
315 if table["include-tests"] == "MRR":
316 tbl_dict[tst_name_mod]["cmp-data"]. \
317 append(tst_data["result"]["receive-rate"].avg)
318 elif table["include-tests"] == "PDR":
319 if tst_data["type"] == "PDR":
320 tbl_dict[tst_name_mod]["cmp-data"]. \
321 append(tst_data["throughput"]["value"])
322 elif tst_data["type"] == "NDRPDR":
323 tbl_dict[tst_name_mod]["cmp-data"].append(
324 tst_data["throughput"]["PDR"]["LOWER"])
325 elif table["include-tests"] == "NDR":
326 if tst_data["type"] == "NDR":
327 tbl_dict[tst_name_mod]["cmp-data"]. \
328 append(tst_data["throughput"]["value"])
329 elif tst_data["type"] == "NDRPDR":
330 tbl_dict[tst_name_mod]["cmp-data"].append(
331 tst_data["throughput"]["NDR"]["LOWER"])
334 except (KeyError, TypeError):
338 for job, builds in item["data"].items():
340 for tst_name, tst_data in data[job][str(build)].iteritems():
341 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343 replace("-ndrdisc", "").replace("-pdr", ""). \
344 replace("-ndr", "").\
345 replace("1t1c", "1c").replace("2t1c", "1c").\
346 replace("2t2c", "2c").replace("4t2c", "2c").\
347 replace("4t4c", "4c").replace("8t4c", "4c")
348 if "across topologies" in table["title"].lower():
349 tst_name_mod = tst_name_mod.replace("2n1l-", "")
350 if tbl_dict.get(tst_name_mod, None) is None:
352 if tbl_dict[tst_name_mod].get("history", None) is None:
353 tbl_dict[tst_name_mod]["history"] = OrderedDict()
354 if tbl_dict[tst_name_mod]["history"].get(item["title"],
356 tbl_dict[tst_name_mod]["history"][item["title"]] = \
359 # TODO: Re-work when NDRPDRDISC tests are not used
360 if table["include-tests"] == "MRR":
361 tbl_dict[tst_name_mod]["history"][item["title"
362 ]].append(tst_data["result"]["receive-rate"].
364 elif table["include-tests"] == "PDR":
365 if tst_data["type"] == "PDR":
366 tbl_dict[tst_name_mod]["history"][
368 append(tst_data["throughput"]["value"])
369 elif tst_data["type"] == "NDRPDR":
370 tbl_dict[tst_name_mod]["history"][item[
371 "title"]].append(tst_data["throughput"][
373 elif table["include-tests"] == "NDR":
374 if tst_data["type"] == "NDR":
375 tbl_dict[tst_name_mod]["history"][
377 append(tst_data["throughput"]["value"])
378 elif tst_data["type"] == "NDRPDR":
379 tbl_dict[tst_name_mod]["history"][item[
380 "title"]].append(tst_data["throughput"][
384 except (TypeError, KeyError):
388 for tst_name in tbl_dict.keys():
389 item = [tbl_dict[tst_name]["name"], ]
391 if tbl_dict[tst_name].get("history", None) is not None:
392 for hist_data in tbl_dict[tst_name]["history"].values():
394 item.append(round(mean(hist_data) / 1000000, 2))
395 item.append(round(stdev(hist_data) / 1000000, 2))
397 item.extend([None, None])
399 item.extend([None, None])
400 data_t = tbl_dict[tst_name]["ref-data"]
402 item.append(round(mean(data_t) / 1000000, 2))
403 item.append(round(stdev(data_t) / 1000000, 2))
405 item.extend([None, None])
406 data_t = tbl_dict[tst_name]["cmp-data"]
408 item.append(round(mean(data_t) / 1000000, 2))
409 item.append(round(stdev(data_t) / 1000000, 2))
411 item.extend([None, None])
412 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
413 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
416 if len(item) == len(header):
419 # Sort the table according to the relative change
420 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
422 # Generate csv tables:
423 csv_file = "{0}.csv".format(table["output-file"])
424 with open(csv_file, "w") as file_handler:
425 file_handler.write(header_str)
427 file_handler.write(",".join([str(item) for item in test]) + "\n")
429 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
432 def table_performance_comparison_nic(table, input_data):
433 """Generate the table(s) with algorithm: table_performance_comparison
434 specified in the specification file.
436 :param table: Table to generate.
437 :param input_data: Data to process.
438 :type table: pandas.Series
439 :type input_data: InputData
442 logging.info(" Generating the table {0} ...".
443 format(table.get("title", "")))
446 logging.info(" Creating the data set for the {0} '{1}'.".
447 format(table.get("type", ""), table.get("title", "")))
448 data = input_data.filter_data(table, continue_on_error=True)
450 # Prepare the header of the tables
452 header = ["Test case", ]
454 if table["include-tests"] == "MRR":
455 hdr_param = "Receive Rate"
457 hdr_param = "Throughput"
459 history = table.get("history", None)
463 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
464 "{0} Stdev [Mpps]".format(item["title"])])
466 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
467 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
468 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
469 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
471 header_str = ",".join(header) + "\n"
472 except (AttributeError, KeyError) as err:
473 logging.error("The model is invalid, missing parameter: {0}".
477 # Prepare data to the table:
479 for job, builds in table["reference"]["data"].items():
481 for tst_name, tst_data in data[job][str(build)].iteritems():
482 if table["reference"]["nic"] not in tst_data["tags"]:
484 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
485 replace("-ndrpdr", "").replace("-pdrdisc", "").\
486 replace("-ndrdisc", "").replace("-pdr", "").\
487 replace("-ndr", "").\
488 replace("1t1c", "1c").replace("2t1c", "1c").\
489 replace("2t2c", "2c").replace("4t2c", "2c").\
490 replace("4t4c", "4c").replace("8t4c", "4c")
491 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
492 if "across topologies" in table["title"].lower():
493 tst_name_mod = tst_name_mod.replace("2n1l-", "")
494 if tbl_dict.get(tst_name_mod, None) is None:
495 name = "{0}".format("-".join(tst_data["name"].
497 if "across testbeds" in table["title"].lower() or \
498 "across topologies" in table["title"].lower():
500 replace("1t1c", "1c").replace("2t1c", "1c").\
501 replace("2t2c", "2c").replace("4t2c", "2c").\
502 replace("4t4c", "4c").replace("8t4c", "4c")
503 tbl_dict[tst_name_mod] = {"name": name,
507 # TODO: Re-work when NDRPDRDISC tests are not used
508 if table["include-tests"] == "MRR":
509 tbl_dict[tst_name_mod]["ref-data"]. \
510 append(tst_data["result"]["receive-rate"].avg)
511 elif table["include-tests"] == "PDR":
512 if tst_data["type"] == "PDR":
513 tbl_dict[tst_name_mod]["ref-data"]. \
514 append(tst_data["throughput"]["value"])
515 elif tst_data["type"] == "NDRPDR":
516 tbl_dict[tst_name_mod]["ref-data"].append(
517 tst_data["throughput"]["PDR"]["LOWER"])
518 elif table["include-tests"] == "NDR":
519 if tst_data["type"] == "NDR":
520 tbl_dict[tst_name_mod]["ref-data"]. \
521 append(tst_data["throughput"]["value"])
522 elif tst_data["type"] == "NDRPDR":
523 tbl_dict[tst_name_mod]["ref-data"].append(
524 tst_data["throughput"]["NDR"]["LOWER"])
528 pass # No data in output.xml for this test
530 for job, builds in table["compare"]["data"].items():
532 for tst_name, tst_data in data[job][str(build)].iteritems():
533 if table["compare"]["nic"] not in tst_data["tags"]:
535 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
536 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
537 replace("-ndrdisc", "").replace("-pdr", ""). \
538 replace("-ndr", "").\
539 replace("1t1c", "1c").replace("2t1c", "1c").\
540 replace("2t2c", "2c").replace("4t2c", "2c").\
541 replace("4t4c", "4c").replace("8t4c", "4c")
542 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
543 if "across topologies" in table["title"].lower():
544 tst_name_mod = tst_name_mod.replace("2n1l-", "")
545 if tbl_dict.get(tst_name_mod, None) is None:
546 name = "{0}".format("-".join(tst_data["name"].
548 if "across testbeds" in table["title"].lower() or \
549 "across topologies" in table["title"].lower():
551 replace("1t1c", "1c").replace("2t1c", "1c").\
552 replace("2t2c", "2c").replace("4t2c", "2c").\
553 replace("4t4c", "4c").replace("8t4c", "4c")
554 tbl_dict[tst_name_mod] = {"name": name,
558 # TODO: Re-work when NDRPDRDISC tests are not used
559 if table["include-tests"] == "MRR":
560 tbl_dict[tst_name_mod]["cmp-data"]. \
561 append(tst_data["result"]["receive-rate"].avg)
562 elif table["include-tests"] == "PDR":
563 if tst_data["type"] == "PDR":
564 tbl_dict[tst_name_mod]["cmp-data"]. \
565 append(tst_data["throughput"]["value"])
566 elif tst_data["type"] == "NDRPDR":
567 tbl_dict[tst_name_mod]["cmp-data"].append(
568 tst_data["throughput"]["PDR"]["LOWER"])
569 elif table["include-tests"] == "NDR":
570 if tst_data["type"] == "NDR":
571 tbl_dict[tst_name_mod]["cmp-data"]. \
572 append(tst_data["throughput"]["value"])
573 elif tst_data["type"] == "NDRPDR":
574 tbl_dict[tst_name_mod]["cmp-data"].append(
575 tst_data["throughput"]["NDR"]["LOWER"])
578 except (KeyError, TypeError):
583 for job, builds in item["data"].items():
585 for tst_name, tst_data in data[job][str(build)].iteritems():
586 if item["nic"] not in tst_data["tags"]:
588 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
589 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
590 replace("-ndrdisc", "").replace("-pdr", ""). \
591 replace("-ndr", "").\
592 replace("1t1c", "1c").replace("2t1c", "1c").\
593 replace("2t2c", "2c").replace("4t2c", "2c").\
594 replace("4t4c", "4c").replace("8t4c", "4c")
595 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
596 if "across topologies" in table["title"].lower():
597 tst_name_mod = tst_name_mod.replace("2n1l-", "")
598 if tbl_dict.get(tst_name_mod, None) is None:
600 if tbl_dict[tst_name_mod].get("history", None) is None:
601 tbl_dict[tst_name_mod]["history"] = OrderedDict()
602 if tbl_dict[tst_name_mod]["history"].get(item["title"],
604 tbl_dict[tst_name_mod]["history"][item["title"]] = \
607 # TODO: Re-work when NDRPDRDISC tests are not used
608 if table["include-tests"] == "MRR":
609 tbl_dict[tst_name_mod]["history"][item["title"
610 ]].append(tst_data["result"]["receive-rate"].
612 elif table["include-tests"] == "PDR":
613 if tst_data["type"] == "PDR":
614 tbl_dict[tst_name_mod]["history"][
616 append(tst_data["throughput"]["value"])
617 elif tst_data["type"] == "NDRPDR":
618 tbl_dict[tst_name_mod]["history"][item[
619 "title"]].append(tst_data["throughput"][
621 elif table["include-tests"] == "NDR":
622 if tst_data["type"] == "NDR":
623 tbl_dict[tst_name_mod]["history"][
625 append(tst_data["throughput"]["value"])
626 elif tst_data["type"] == "NDRPDR":
627 tbl_dict[tst_name_mod]["history"][item[
628 "title"]].append(tst_data["throughput"][
632 except (TypeError, KeyError):
636 for tst_name in tbl_dict.keys():
637 item = [tbl_dict[tst_name]["name"], ]
639 if tbl_dict[tst_name].get("history", None) is not None:
640 for hist_data in tbl_dict[tst_name]["history"].values():
642 item.append(round(mean(hist_data) / 1000000, 2))
643 item.append(round(stdev(hist_data) / 1000000, 2))
645 item.extend([None, None])
647 item.extend([None, None])
648 data_t = tbl_dict[tst_name]["ref-data"]
650 item.append(round(mean(data_t) / 1000000, 2))
651 item.append(round(stdev(data_t) / 1000000, 2))
653 item.extend([None, None])
654 data_t = tbl_dict[tst_name]["cmp-data"]
656 item.append(round(mean(data_t) / 1000000, 2))
657 item.append(round(stdev(data_t) / 1000000, 2))
659 item.extend([None, None])
660 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
661 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
664 if len(item) == len(header):
667 # Sort the table according to the relative change
668 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
670 # Generate csv tables:
671 csv_file = "{0}.csv".format(table["output-file"])
672 with open(csv_file, "w") as file_handler:
673 file_handler.write(header_str)
675 file_handler.write(",".join([str(item) for item in test]) + "\n")
677 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
680 def table_nics_comparison(table, input_data):
681 """Generate the table(s) with algorithm: table_nics_comparison
682 specified in the specification file.
684 :param table: Table to generate.
685 :param input_data: Data to process.
686 :type table: pandas.Series
687 :type input_data: InputData
690 logging.info(" Generating the table {0} ...".
691 format(table.get("title", "")))
694 logging.info(" Creating the data set for the {0} '{1}'.".
695 format(table.get("type", ""), table.get("title", "")))
696 data = input_data.filter_data(table, continue_on_error=True)
698 # Prepare the header of the tables
700 header = ["Test case", ]
702 if table["include-tests"] == "MRR":
703 hdr_param = "Receive Rate"
705 hdr_param = "Throughput"
708 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
709 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
710 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
711 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
713 header_str = ",".join(header) + "\n"
714 except (AttributeError, KeyError) as err:
715 logging.error("The model is invalid, missing parameter: {0}".
719 # Prepare data to the table:
721 for job, builds in table["data"].items():
723 for tst_name, tst_data in data[job][str(build)].iteritems():
724 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
725 replace("-ndrpdr", "").replace("-pdrdisc", "").\
726 replace("-ndrdisc", "").replace("-pdr", "").\
727 replace("-ndr", "").\
728 replace("1t1c", "1c").replace("2t1c", "1c").\
729 replace("2t2c", "2c").replace("4t2c", "2c").\
730 replace("4t4c", "4c").replace("8t4c", "4c")
731 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
732 if tbl_dict.get(tst_name_mod, None) is None:
733 name = "-".join(tst_data["name"].split("-")[:-1])
734 tbl_dict[tst_name_mod] = {"name": name,
738 if table["include-tests"] == "MRR":
739 result = tst_data["result"]["receive-rate"].avg
740 elif table["include-tests"] == "PDR":
741 result = tst_data["throughput"]["PDR"]["LOWER"]
742 elif table["include-tests"] == "NDR":
743 result = tst_data["throughput"]["NDR"]["LOWER"]
748 if table["reference"]["nic"] in tst_data["tags"]:
749 tbl_dict[tst_name_mod]["ref-data"].append(result)
750 elif table["compare"]["nic"] in tst_data["tags"]:
751 tbl_dict[tst_name_mod]["cmp-data"].append(result)
752 except (TypeError, KeyError) as err:
753 logging.debug("No data for {0}".format(tst_name))
754 logging.debug(repr(err))
755 # No data in output.xml for this test
758 for tst_name in tbl_dict.keys():
759 item = [tbl_dict[tst_name]["name"], ]
760 data_t = tbl_dict[tst_name]["ref-data"]
762 item.append(round(mean(data_t) / 1000000, 2))
763 item.append(round(stdev(data_t) / 1000000, 2))
765 item.extend([None, None])
766 data_t = tbl_dict[tst_name]["cmp-data"]
768 item.append(round(mean(data_t) / 1000000, 2))
769 item.append(round(stdev(data_t) / 1000000, 2))
771 item.extend([None, None])
772 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
773 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
774 if len(item) == len(header):
777 # Sort the table according to the relative change
778 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
780 # Generate csv tables:
781 csv_file = "{0}.csv".format(table["output-file"])
782 with open(csv_file, "w") as file_handler:
783 file_handler.write(header_str)
785 file_handler.write(",".join([str(item) for item in test]) + "\n")
787 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
790 def table_soak_vs_ndr(table, input_data):
791 """Generate the table(s) with algorithm: table_soak_vs_ndr
792 specified in the specification file.
794 :param table: Table to generate.
795 :param input_data: Data to process.
796 :type table: pandas.Series
797 :type input_data: InputData
800 logging.info(" Generating the table {0} ...".
801 format(table.get("title", "")))
804 logging.info(" Creating the data set for the {0} '{1}'.".
805 format(table.get("type", ""), table.get("title", "")))
806 data = input_data.filter_data(table, continue_on_error=True)
808 # Prepare the header of the table
812 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
813 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
814 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
815 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
816 "Delta [%]", "Stdev of delta [%]"]
817 header_str = ",".join(header) + "\n"
818 except (AttributeError, KeyError) as err:
819 logging.error("The model is invalid, missing parameter: {0}".
823 # Create a list of available SOAK test results:
825 for job, builds in table["compare"]["data"].items():
827 for tst_name, tst_data in data[job][str(build)].iteritems():
828 if tst_data["type"] == "SOAK":
829 tst_name_mod = tst_name.replace("-soak", "")
830 if tbl_dict.get(tst_name_mod, None) is None:
831 groups = re.search(REGEX_NIC, tst_data["parent"])
832 nic = groups.group(0) if groups else ""
833 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
835 tbl_dict[tst_name_mod] = {
841 tbl_dict[tst_name_mod]["cmp-data"].append(
842 tst_data["throughput"]["LOWER"])
843 except (KeyError, TypeError):
845 tests_lst = tbl_dict.keys()
847 # Add corresponding NDR test results:
848 for job, builds in table["reference"]["data"].items():
850 for tst_name, tst_data in data[job][str(build)].iteritems():
851 tst_name_mod = tst_name.replace("-ndrpdr", "").\
853 if tst_name_mod in tests_lst:
855 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
856 if table["include-tests"] == "MRR":
857 result = tst_data["result"]["receive-rate"].avg
858 elif table["include-tests"] == "PDR":
859 result = tst_data["throughput"]["PDR"]["LOWER"]
860 elif table["include-tests"] == "NDR":
861 result = tst_data["throughput"]["NDR"]["LOWER"]
864 if result is not None:
865 tbl_dict[tst_name_mod]["ref-data"].append(
867 except (KeyError, TypeError):
871 for tst_name in tbl_dict.keys():
872 item = [tbl_dict[tst_name]["name"], ]
873 data_r = tbl_dict[tst_name]["ref-data"]
875 data_r_mean = mean(data_r)
876 item.append(round(data_r_mean / 1000000, 2))
877 data_r_stdev = stdev(data_r)
878 item.append(round(data_r_stdev / 1000000, 2))
882 item.extend([None, None])
883 data_c = tbl_dict[tst_name]["cmp-data"]
885 data_c_mean = mean(data_c)
886 item.append(round(data_c_mean / 1000000, 2))
887 data_c_stdev = stdev(data_c)
888 item.append(round(data_c_stdev / 1000000, 2))
892 item.extend([None, None])
893 if data_r_mean and data_c_mean:
894 delta, d_stdev = relative_change_stdev(
895 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
896 item.append(round(delta, 2))
897 item.append(round(d_stdev, 2))
900 # Sort the table according to the relative change
901 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
903 # Generate csv tables:
904 csv_file = "{0}.csv".format(table["output-file"])
905 with open(csv_file, "w") as file_handler:
906 file_handler.write(header_str)
908 file_handler.write(",".join([str(item) for item in test]) + "\n")
910 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
913 def table_performance_trending_dashboard(table, input_data):
914 """Generate the table(s) with algorithm:
915 table_performance_trending_dashboard
916 specified in the specification file.
918 :param table: Table to generate.
919 :param input_data: Data to process.
920 :type table: pandas.Series
921 :type input_data: InputData
924 logging.info(" Generating the table {0} ...".
925 format(table.get("title", "")))
928 logging.info(" Creating the data set for the {0} '{1}'.".
929 format(table.get("type", ""), table.get("title", "")))
930 data = input_data.filter_data(table, continue_on_error=True)
932 # Prepare the header of the tables
933 header = ["Test Case",
935 "Short-Term Change [%]",
936 "Long-Term Change [%]",
940 header_str = ",".join(header) + "\n"
942 # Prepare data to the table:
944 for job, builds in table["data"].items():
946 for tst_name, tst_data in data[job][str(build)].iteritems():
947 if tst_name.lower() in table.get("ignore-list", list()):
949 if tbl_dict.get(tst_name, None) is None:
950 groups = re.search(REGEX_NIC, tst_data["parent"])
953 nic = groups.group(0)
954 tbl_dict[tst_name] = {
955 "name": "{0}-{1}".format(nic, tst_data["name"]),
956 "data": OrderedDict()}
958 tbl_dict[tst_name]["data"][str(build)] = \
959 tst_data["result"]["receive-rate"]
960 except (TypeError, KeyError):
961 pass # No data in output.xml for this test
964 for tst_name in tbl_dict.keys():
965 data_t = tbl_dict[tst_name]["data"]
969 classification_lst, avgs = classify_anomalies(data_t)
971 win_size = min(len(data_t), table["window"])
972 long_win_size = min(len(data_t), table["long-trend-window"])
976 [x for x in avgs[-long_win_size:-win_size]
981 avg_week_ago = avgs[max(-win_size, -len(avgs))]
983 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
984 rel_change_last = nan
986 rel_change_last = round(
987 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
989 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
990 rel_change_long = nan
992 rel_change_long = round(
993 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
995 if classification_lst:
996 if isnan(rel_change_last) and isnan(rel_change_long):
998 if (isnan(last_avg) or
999 isnan(rel_change_last) or
1000 isnan(rel_change_long)):
1003 [tbl_dict[tst_name]["name"],
1004 round(last_avg / 1000000, 2),
1007 classification_lst[-win_size:].count("regression"),
1008 classification_lst[-win_size:].count("progression")])
1010 tbl_lst.sort(key=lambda rel: rel[0])
1013 for nrr in range(table["window"], -1, -1):
1014 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1015 for nrp in range(table["window"], -1, -1):
1016 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1017 tbl_out.sort(key=lambda rel: rel[2])
1018 tbl_sorted.extend(tbl_out)
1020 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1022 logging.info(" Writing file: '{0}'".format(file_name))
1023 with open(file_name, "w") as file_handler:
1024 file_handler.write(header_str)
1025 for test in tbl_sorted:
1026 file_handler.write(",".join([str(item) for item in test]) + '\n')
1028 txt_file_name = "{0}.txt".format(table["output-file"])
1029 logging.info(" Writing file: '{0}'".format(txt_file_name))
1030 convert_csv_to_pretty_txt(file_name, txt_file_name)
1033 def _generate_url(base, testbed, test_name):
1034 """Generate URL to a trending plot from the name of the test case.
1036 :param base: The base part of URL common to all test cases.
1037 :param testbed: The testbed used for testing.
1038 :param test_name: The name of the test case.
1041 :type test_name: str
1042 :returns: The URL to the plot with the trending data for the given test
1052 if "lbdpdk" in test_name or "lbvpp" in test_name:
1053 file_name = "link_bonding"
1055 elif "114b" in test_name and "vhost" in test_name:
1058 elif "testpmd" in test_name or "l3fwd" in test_name:
1061 elif "memif" in test_name:
1062 file_name = "container_memif"
1065 elif "srv6" in test_name:
1068 elif "vhost" in test_name:
1069 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1070 file_name = "vm_vhost_l2"
1071 if "114b" in test_name:
1073 elif "l2xcbase" in test_name and "x520" in test_name:
1074 feature = "-base-l2xc"
1075 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1076 feature = "-base-l2bd"
1079 elif "ip4base" in test_name:
1080 file_name = "vm_vhost_ip4"
1083 elif "ipsecbasetnlsw" in test_name:
1084 file_name = "ipsecsw"
1085 feature = "-base-scale"
1087 elif "ipsec" in test_name:
1089 feature = "-base-scale"
1090 if "hw-" in test_name:
1091 file_name = "ipsechw"
1092 elif "sw-" in test_name:
1093 file_name = "ipsecsw"
1095 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1096 file_name = "ip4_tunnels"
1099 elif "ip4base" in test_name or "ip4scale" in test_name:
1101 if "xl710" in test_name:
1102 feature = "-base-scale-features"
1103 elif "iacl" in test_name:
1104 feature = "-features-iacl"
1105 elif "oacl" in test_name:
1106 feature = "-features-oacl"
1107 elif "snat" in test_name or "cop" in test_name:
1108 feature = "-features"
1110 feature = "-base-scale"
1112 elif "ip6base" in test_name or "ip6scale" in test_name:
1114 feature = "-base-scale"
1116 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1117 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1118 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1120 if "macip" in test_name:
1121 feature = "-features-macip"
1122 elif "iacl" in test_name:
1123 feature = "-features-iacl"
1124 elif "oacl" in test_name:
1125 feature = "-features-oacl"
1127 feature = "-base-scale"
1129 if "x520" in test_name:
1131 elif "x710" in test_name:
1133 elif "xl710" in test_name:
1135 elif "xxv710" in test_name:
1137 elif "vic1227" in test_name:
1139 elif "vic1385" in test_name:
1145 if "64b" in test_name:
1147 elif "78b" in test_name:
1149 elif "imix" in test_name:
1151 elif "9000b" in test_name:
1153 elif "1518b" in test_name:
1155 elif "114b" in test_name:
1159 anchor += framesize + '-'
1161 if "1t1c" in test_name:
1163 elif "2t2c" in test_name:
1165 elif "4t4c" in test_name:
1167 elif "2t1c" in test_name:
1169 elif "4t2c" in test_name:
1171 elif "8t4c" in test_name:
1174 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1175 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1178 def table_performance_trending_dashboard_html(table, input_data):
1179 """Generate the table(s) with algorithm:
1180 table_performance_trending_dashboard_html specified in the specification
1183 :param table: Table to generate.
1184 :param input_data: Data to process.
1186 :type input_data: InputData
1189 testbed = table.get("testbed", None)
1191 logging.error("The testbed is not defined for the table '{0}'.".
1192 format(table.get("title", "")))
1195 logging.info(" Generating the table {0} ...".
1196 format(table.get("title", "")))
1199 with open(table["input-file"], 'rb') as csv_file:
1200 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1201 csv_lst = [item for item in csv_content]
1203 logging.warning("The input file is not defined.")
1205 except csv.Error as err:
1206 logging.warning("Not possible to process the file '{0}'.\n{1}".
1207 format(table["input-file"], err))
1211 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1214 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1215 for idx, item in enumerate(csv_lst[0]):
1216 alignment = "left" if idx == 0 else "center"
1217 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1221 colors = {"regression": ("#ffcccc", "#ff9999"),
1222 "progression": ("#c6ecc6", "#9fdf9f"),
1223 "normal": ("#e9f1fb", "#d4e4f7")}
1224 for r_idx, row in enumerate(csv_lst[1:]):
1226 color = "regression"
1228 color = "progression"
1231 background = colors[color][r_idx % 2]
1232 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1235 for c_idx, item in enumerate(row):
1236 alignment = "left" if c_idx == 0 else "center"
1237 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1240 url = _generate_url("../trending/", testbed, item)
1241 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1246 with open(table["output-file"], 'w') as html_file:
1247 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1248 html_file.write(".. raw:: html\n\n\t")
1249 html_file.write(ET.tostring(dashboard))
1250 html_file.write("\n\t<p><br><br></p>\n")
1252 logging.warning("The output file is not defined.")
1256 def table_last_failed_tests(table, input_data):
1257 """Generate the table(s) with algorithm: table_last_failed_tests
1258 specified in the specification file.
1260 :param table: Table to generate.
1261 :param input_data: Data to process.
1262 :type table: pandas.Series
1263 :type input_data: InputData
1266 logging.info(" Generating the table {0} ...".
1267 format(table.get("title", "")))
1269 # Transform the data
1270 logging.info(" Creating the data set for the {0} '{1}'.".
1271 format(table.get("type", ""), table.get("title", "")))
1272 data = input_data.filter_data(table, continue_on_error=True)
1274 if data is None or data.empty:
1275 logging.warn(" No data for the {0} '{1}'.".
1276 format(table.get("type", ""), table.get("title", "")))
1280 for job, builds in table["data"].items():
1281 for build in builds:
1284 version = input_data.metadata(job, build).get("version", "")
1286 logging.error("Data for {job}: {build} is not present.".
1287 format(job=job, build=build))
1289 tbl_list.append(build)
1290 tbl_list.append(version)
1291 for tst_name, tst_data in data[job][build].iteritems():
1292 if tst_data["status"] != "FAIL":
1294 groups = re.search(REGEX_NIC, tst_data["parent"])
1297 nic = groups.group(0)
1298 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1300 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1301 logging.info(" Writing file: '{0}'".format(file_name))
1302 with open(file_name, "w") as file_handler:
1303 for test in tbl_list:
1304 file_handler.write(test + '\n')
1307 def table_failed_tests(table, input_data):
1308 """Generate the table(s) with algorithm: table_failed_tests
1309 specified in the specification file.
1311 :param table: Table to generate.
1312 :param input_data: Data to process.
1313 :type table: pandas.Series
1314 :type input_data: InputData
1317 logging.info(" Generating the table {0} ...".
1318 format(table.get("title", "")))
1320 # Transform the data
1321 logging.info(" Creating the data set for the {0} '{1}'.".
1322 format(table.get("type", ""), table.get("title", "")))
1323 data = input_data.filter_data(table, continue_on_error=True)
1325 # Prepare the header of the tables
1326 header = ["Test Case",
1328 "Last Failure [Time]",
1329 "Last Failure [VPP-Build-Id]",
1330 "Last Failure [CSIT-Job-Build-Id]"]
1332 # Generate the data for the table according to the model in the table
1336 timeperiod = timedelta(int(table.get("window", 7)))
1339 for job, builds in table["data"].items():
1340 for build in builds:
1342 for tst_name, tst_data in data[job][build].iteritems():
1343 if tst_name.lower() in table.get("ignore-list", list()):
1345 if tbl_dict.get(tst_name, None) is None:
1346 groups = re.search(REGEX_NIC, tst_data["parent"])
1349 nic = groups.group(0)
1350 tbl_dict[tst_name] = {
1351 "name": "{0}-{1}".format(nic, tst_data["name"]),
1352 "data": OrderedDict()}
1354 generated = input_data.metadata(job, build).\
1355 get("generated", "")
1358 then = dt.strptime(generated, "%Y%m%d %H:%M")
1359 if (now - then) <= timeperiod:
1360 tbl_dict[tst_name]["data"][build] = (
1363 input_data.metadata(job, build).get("version", ""),
1365 except (TypeError, KeyError) as err:
1366 logging.warning("tst_name: {} - err: {}".
1367 format(tst_name, repr(err)))
1371 for tst_data in tbl_dict.values():
1373 for val in tst_data["data"].values():
1374 if val[0] == "FAIL":
1376 fails_last_date = val[1]
1377 fails_last_vpp = val[2]
1378 fails_last_csit = val[3]
1380 max_fails = fails_nr if fails_nr > max_fails else max_fails
1381 tbl_lst.append([tst_data["name"],
1385 "mrr-daily-build-{0}".format(fails_last_csit)])
1387 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1389 for nrf in range(max_fails, -1, -1):
1390 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1391 tbl_sorted.extend(tbl_fails)
1392 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1394 logging.info(" Writing file: '{0}'".format(file_name))
1395 with open(file_name, "w") as file_handler:
1396 file_handler.write(",".join(header) + "\n")
1397 for test in tbl_sorted:
1398 file_handler.write(",".join([str(item) for item in test]) + '\n')
1400 txt_file_name = "{0}.txt".format(table["output-file"])
1401 logging.info(" Writing file: '{0}'".format(txt_file_name))
1402 convert_csv_to_pretty_txt(file_name, txt_file_name)
1405 def table_failed_tests_html(table, input_data):
1406 """Generate the table(s) with algorithm: table_failed_tests_html
1407 specified in the specification file.
1409 :param table: Table to generate.
1410 :param input_data: Data to process.
1411 :type table: pandas.Series
1412 :type input_data: InputData
1415 testbed = table.get("testbed", None)
1417 logging.error("The testbed is not defined for the table '{0}'.".
1418 format(table.get("title", "")))
1421 logging.info(" Generating the table {0} ...".
1422 format(table.get("title", "")))
1425 with open(table["input-file"], 'rb') as csv_file:
1426 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1427 csv_lst = [item for item in csv_content]
1429 logging.warning("The input file is not defined.")
1431 except csv.Error as err:
1432 logging.warning("Not possible to process the file '{0}'.\n{1}".
1433 format(table["input-file"], err))
1437 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1440 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1441 for idx, item in enumerate(csv_lst[0]):
1442 alignment = "left" if idx == 0 else "center"
1443 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1447 colors = ("#e9f1fb", "#d4e4f7")
1448 for r_idx, row in enumerate(csv_lst[1:]):
1449 background = colors[r_idx % 2]
1450 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1453 for c_idx, item in enumerate(row):
1454 alignment = "left" if c_idx == 0 else "center"
1455 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1458 url = _generate_url("../trending/", testbed, item)
1459 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1464 with open(table["output-file"], 'w') as html_file:
1465 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1466 html_file.write(".. raw:: html\n\n\t")
1467 html_file.write(ET.tostring(failed_tests))
1468 html_file.write("\n\t<p><br><br></p>\n")
1470 logging.warning("The output file is not defined.")