1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from collections import OrderedDict
25 from numpy import nan, isnan
26 from xml.etree import ElementTree as ET
28 from errors import PresentationError
29 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
32 def generate_tables(spec, data):
33 """Generate all tables specified in the specification file.
35 :param spec: Specification read from the specification file.
36 :param data: Data to process.
37 :type spec: Specification
41 logging.info("Generating the tables ...")
42 for table in spec.tables:
44 eval(table["algorithm"])(table, data)
46 logging.error("The algorithm '{0}' is not defined.".
47 format(table["algorithm"]))
51 def table_details(table, input_data):
52 """Generate the table(s) with algorithm: table_detailed_test_results
53 specified in the specification file.
55 :param table: Table to generate.
56 :param input_data: Data to process.
57 :type table: pandas.Series
58 :type input_data: InputData
61 logging.info(" Generating the table {0} ...".
62 format(table.get("title", "")))
65 logging.info(" Creating the data set for the {0} '{1}'.".
66 format(table.get("type", ""), table.get("title", "")))
67 data = input_data.filter_data(table)
69 # Prepare the header of the tables
71 for column in table["columns"]:
72 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
74 # Generate the data for the table according to the model in the table
76 job = table["data"].keys()[0]
77 build = str(table["data"][job][0])
79 suites = input_data.suites(job, build)
81 logging.error(" No data available. The table will not be generated.")
84 for suite_longname, suite in suites.iteritems():
86 suite_name = suite["name"]
88 for test in data[job][build].keys():
89 if data[job][build][test]["parent"] in suite_name:
91 for column in table["columns"]:
93 col_data = str(data[job][build][test][column["data"].
94 split(" ")[1]]).replace('"', '""')
95 if column["data"].split(" ")[1] in ("vat-history",
97 col_data = replace(col_data, " |br| ", "",
99 col_data = " |prein| {0} |preout| ".\
100 format(col_data[:-5])
101 row_lst.append('"{0}"'.format(col_data))
103 row_lst.append("No data")
104 table_lst.append(row_lst)
106 # Write the data to file
108 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
109 table["output-file-ext"])
110 logging.info(" Writing file: '{}'".format(file_name))
111 with open(file_name, "w") as file_handler:
112 file_handler.write(",".join(header) + "\n")
113 for item in table_lst:
114 file_handler.write(",".join(item) + "\n")
116 logging.info(" Done.")
119 def table_merged_details(table, input_data):
120 """Generate the table(s) with algorithm: table_merged_details
121 specified in the specification file.
123 :param table: Table to generate.
124 :param input_data: Data to process.
125 :type table: pandas.Series
126 :type input_data: InputData
129 logging.info(" Generating the table {0} ...".
130 format(table.get("title", "")))
133 logging.info(" Creating the data set for the {0} '{1}'.".
134 format(table.get("type", ""), table.get("title", "")))
135 data = input_data.filter_data(table)
136 data = input_data.merge_data(data)
137 data.sort_index(inplace=True)
139 logging.info(" Creating the data set for the {0} '{1}'.".
140 format(table.get("type", ""), table.get("title", "")))
141 suites = input_data.filter_data(table, data_set="suites")
142 suites = input_data.merge_data(suites)
144 # Prepare the header of the tables
146 for column in table["columns"]:
147 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
149 for _, suite in suites.iteritems():
151 suite_name = suite["name"]
153 for test in data.keys():
154 if data[test]["parent"] in suite_name:
156 for column in table["columns"]:
158 col_data = str(data[test][column["data"].
159 split(" ")[1]]).replace('"', '""')
160 if column["data"].split(" ")[1] in ("vat-history",
162 col_data = replace(col_data, " |br| ", "",
164 col_data = " |prein| {0} |preout| ".\
165 format(col_data[:-5])
166 row_lst.append('"{0}"'.format(col_data))
168 row_lst.append("No data")
169 table_lst.append(row_lst)
171 # Write the data to file
173 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
174 table["output-file-ext"])
175 logging.info(" Writing file: '{}'".format(file_name))
176 with open(file_name, "w") as file_handler:
177 file_handler.write(",".join(header) + "\n")
178 for item in table_lst:
179 file_handler.write(",".join(item) + "\n")
181 logging.info(" Done.")
184 def table_performance_improvements(table, input_data):
185 """Generate the table(s) with algorithm: table_performance_improvements
186 specified in the specification file.
188 :param table: Table to generate.
189 :param input_data: Data to process.
190 :type table: pandas.Series
191 :type input_data: InputData
194 def _write_line_to_file(file_handler, data):
195 """Write a line to the .csv file.
197 :param file_handler: File handler for the csv file. It must be open for
199 :param data: Item to be written to the file.
200 :type file_handler: BinaryIO
206 if isinstance(item["data"], str):
207 # Remove -?drdisc from the end
208 if item["data"].endswith("drdisc"):
209 item["data"] = item["data"][:-8]
210 line_lst.append(item["data"])
211 elif isinstance(item["data"], float):
212 line_lst.append("{:.1f}".format(item["data"]))
213 elif item["data"] is None:
215 file_handler.write(",".join(line_lst) + "\n")
217 logging.info(" Generating the table {0} ...".
218 format(table.get("title", "")))
221 file_name = table.get("template", None)
224 tmpl = _read_csv_template(file_name)
225 except PresentationError:
226 logging.error(" The template '{0}' does not exist. Skipping the "
227 "table.".format(file_name))
230 logging.error("The template is not defined. Skipping the table.")
234 logging.info(" Creating the data set for the {0} '{1}'.".
235 format(table.get("type", ""), table.get("title", "")))
236 data = input_data.filter_data(table)
238 # Prepare the header of the tables
240 for column in table["columns"]:
241 header.append(column["title"])
243 # Generate the data for the table according to the model in the table
246 for tmpl_item in tmpl:
248 for column in table["columns"]:
249 cmd = column["data"].split(" ")[0]
250 args = column["data"].split(" ")[1:]
251 if cmd == "template":
253 val = float(tmpl_item[int(args[0])])
255 val = tmpl_item[int(args[0])]
256 tbl_item.append({"data": val})
262 for build in data[job]:
264 data_lst.append(float(build[tmpl_item[0]]
265 ["throughput"]["value"]))
266 except (KeyError, TypeError):
270 tbl_item.append({"data": (eval(operation)(data_lst)) /
273 tbl_item.append({"data": None})
274 elif cmd == "operation":
277 nr1 = float(tbl_item[int(args[1])]["data"])
278 nr2 = float(tbl_item[int(args[2])]["data"])
280 tbl_item.append({"data": eval(operation)(nr1, nr2)})
282 tbl_item.append({"data": None})
283 except (IndexError, ValueError, TypeError):
284 logging.error("No data for {0}".format(tbl_item[0]["data"]))
285 tbl_item.append({"data": None})
288 logging.error("Not supported command {0}. Skipping the table.".
291 tbl_lst.append(tbl_item)
293 # Sort the table according to the relative change
294 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
296 # Create the tables and write them to the files
298 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
299 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
304 for file_name in file_names:
305 logging.info(" Writing the file '{0}'".format(file_name))
306 with open(file_name, "w") as file_handler:
307 file_handler.write(",".join(header) + "\n")
309 if isinstance(item[-1]["data"], float):
310 rel_change = round(item[-1]["data"], 1)
312 rel_change = item[-1]["data"]
313 if "ndr_top" in file_name \
314 and "ndr" in item[0]["data"] \
315 and rel_change >= 10.0:
316 _write_line_to_file(file_handler, item)
317 elif "pdr_top" in file_name \
318 and "pdr" in item[0]["data"] \
319 and rel_change >= 10.0:
320 _write_line_to_file(file_handler, item)
321 elif "ndr_low" in file_name \
322 and "ndr" in item[0]["data"] \
323 and rel_change < 10.0:
324 _write_line_to_file(file_handler, item)
325 elif "pdr_low" in file_name \
326 and "pdr" in item[0]["data"] \
327 and rel_change < 10.0:
328 _write_line_to_file(file_handler, item)
330 logging.info(" Done.")
333 def _read_csv_template(file_name):
334 """Read the template from a .csv file.
336 :param file_name: Name / full path / relative path of the file to read.
338 :returns: Data from the template as list (lines) of lists (items on line).
340 :raises: PresentationError if it is not possible to read the file.
344 with open(file_name, 'r') as csv_file:
346 for line in csv_file:
347 tmpl_data.append(line[:-1].split(","))
349 except IOError as err:
350 raise PresentationError(str(err), level="ERROR")
353 def table_performance_comparison(table, input_data):
354 """Generate the table(s) with algorithm: table_performance_comparison
355 specified in the specification file.
357 :param table: Table to generate.
358 :param input_data: Data to process.
359 :type table: pandas.Series
360 :type input_data: InputData
363 logging.info(" Generating the table {0} ...".
364 format(table.get("title", "")))
367 logging.info(" Creating the data set for the {0} '{1}'.".
368 format(table.get("type", ""), table.get("title", "")))
369 data = input_data.filter_data(table, continue_on_error=True)
371 # Prepare the header of the tables
373 header = ["Test case", ]
375 history = table.get("history", None)
379 ["{0} Throughput [Mpps]".format(item["title"]),
380 "{0} Stdev [Mpps]".format(item["title"])])
382 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
383 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
384 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
385 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
387 header_str = ",".join(header) + "\n"
388 except (AttributeError, KeyError) as err:
389 logging.error("The model is invalid, missing parameter: {0}".
393 # Prepare data to the table:
395 for job, builds in table["reference"]["data"].items():
397 for tst_name, tst_data in data[job][str(build)].iteritems():
398 if tbl_dict.get(tst_name, None) is None:
399 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
400 "-".join(tst_data["name"].
402 tbl_dict[tst_name] = {"name": name,
406 tbl_dict[tst_name]["ref-data"].\
407 append(tst_data["throughput"]["value"])
409 pass # No data in output.xml for this test
411 for job, builds in table["compare"]["data"].items():
413 for tst_name, tst_data in data[job][str(build)].iteritems():
415 tbl_dict[tst_name]["cmp-data"].\
416 append(tst_data["throughput"]["value"])
420 tbl_dict.pop(tst_name, None)
423 for job, builds in item["data"].items():
425 for tst_name, tst_data in data[job][str(build)].iteritems():
426 if tbl_dict.get(tst_name, None) is None:
428 if tbl_dict[tst_name].get("history", None) is None:
429 tbl_dict[tst_name]["history"] = OrderedDict()
430 if tbl_dict[tst_name]["history"].get(item["title"],
432 tbl_dict[tst_name]["history"][item["title"]] = \
435 tbl_dict[tst_name]["history"][item["title"]].\
436 append(tst_data["throughput"]["value"])
437 except (TypeError, KeyError):
441 for tst_name in tbl_dict.keys():
442 item = [tbl_dict[tst_name]["name"], ]
444 if tbl_dict[tst_name].get("history", None) is not None:
445 for hist_data in tbl_dict[tst_name]["history"].values():
447 data_t = remove_outliers(
448 hist_data, outlier_const=table["outlier-const"])
450 item.append(round(mean(data_t) / 1000000, 2))
451 item.append(round(stdev(data_t) / 1000000, 2))
453 item.extend([None, None])
455 item.extend([None, None])
457 item.extend([None, None])
458 if tbl_dict[tst_name]["ref-data"]:
459 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
460 outlier_const=table["outlier-const"])
461 # TODO: Specify window size.
463 item.append(round(mean(data_t) / 1000000, 2))
464 item.append(round(stdev(data_t) / 1000000, 2))
466 item.extend([None, None])
468 item.extend([None, None])
469 if tbl_dict[tst_name]["cmp-data"]:
470 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
471 outlier_const=table["outlier-const"])
472 # TODO: Specify window size.
474 item.append(round(mean(data_t) / 1000000, 2))
475 item.append(round(stdev(data_t) / 1000000, 2))
477 item.extend([None, None])
479 item.extend([None, None])
480 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
481 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
482 if len(item) == len(header):
485 # Sort the table according to the relative change
486 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
490 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
491 table["output-file-ext"]),
492 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
493 table["output-file-ext"]),
494 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
495 table["output-file-ext"]),
496 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
497 table["output-file-ext"]),
498 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
499 table["output-file-ext"]),
500 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
501 table["output-file-ext"])
503 for file_name in tbl_names:
504 logging.info(" Writing file: '{0}'".format(file_name))
505 with open(file_name, "w") as file_handler:
506 file_handler.write(header_str)
508 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
509 file_name.split("-")[-2] in test[0]): # cores
510 test[0] = "-".join(test[0].split("-")[:-1])
511 file_handler.write(",".join([str(item) for item in test]) +
515 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
516 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
517 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
518 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
519 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
520 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
523 for i, txt_name in enumerate(tbl_names_txt):
525 logging.info(" Writing file: '{0}'".format(txt_name))
526 with open(tbl_names[i], 'rb') as csv_file:
527 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
528 for row in csv_content:
529 if txt_table is None:
530 txt_table = prettytable.PrettyTable(row)
532 txt_table.add_row(row)
533 txt_table.align["Test case"] = "l"
534 with open(txt_name, "w") as txt_file:
535 txt_file.write(str(txt_table))
537 # Selected tests in csv:
538 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
539 table["output-file-ext"])
540 with open(input_file, "r") as in_file:
545 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
546 table["output-file-ext"])
547 logging.info(" Writing file: '{0}'".format(output_file))
548 with open(output_file, "w") as out_file:
549 out_file.write(header_str)
550 for i, line in enumerate(lines[1:]):
551 if i == table["nr-of-tests-shown"]:
555 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
556 table["output-file-ext"])
557 logging.info(" Writing file: '{0}'".format(output_file))
558 with open(output_file, "w") as out_file:
559 out_file.write(header_str)
560 for i, line in enumerate(lines[-1:0:-1]):
561 if i == table["nr-of-tests-shown"]:
565 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
566 table["output-file-ext"])
567 with open(input_file, "r") as in_file:
572 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
573 table["output-file-ext"])
574 logging.info(" Writing file: '{0}'".format(output_file))
575 with open(output_file, "w") as out_file:
576 out_file.write(header_str)
577 for i, line in enumerate(lines[1:]):
578 if i == table["nr-of-tests-shown"]:
582 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
583 table["output-file-ext"])
584 logging.info(" Writing file: '{0}'".format(output_file))
585 with open(output_file, "w") as out_file:
586 out_file.write(header_str)
587 for i, line in enumerate(lines[-1:0:-1]):
588 if i == table["nr-of-tests-shown"]:
593 def table_performance_comparison_mrr(table, input_data):
594 """Generate the table(s) with algorithm: table_performance_comparison_mrr
595 specified in the specification file.
597 :param table: Table to generate.
598 :param input_data: Data to process.
599 :type table: pandas.Series
600 :type input_data: InputData
603 logging.info(" Generating the table {0} ...".
604 format(table.get("title", "")))
607 logging.info(" Creating the data set for the {0} '{1}'.".
608 format(table.get("type", ""), table.get("title", "")))
609 data = input_data.filter_data(table, continue_on_error=True)
611 # Prepare the header of the tables
613 header = ["Test case",
614 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
615 "{0} stdev [Mpps]".format(table["reference"]["title"]),
616 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
617 "{0} stdev [Mpps]".format(table["compare"]["title"]),
619 header_str = ",".join(header) + "\n"
620 except (AttributeError, KeyError) as err:
621 logging.error("The model is invalid, missing parameter: {0}".
625 # Prepare data to the table:
627 for job, builds in table["reference"]["data"].items():
629 for tst_name, tst_data in data[job][str(build)].iteritems():
630 if tbl_dict.get(tst_name, None) is None:
631 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
632 "-".join(tst_data["name"].
634 tbl_dict[tst_name] = {"name": name,
638 tbl_dict[tst_name]["ref-data"].\
639 append(tst_data["result"]["throughput"])
641 pass # No data in output.xml for this test
643 for job, builds in table["compare"]["data"].items():
645 for tst_name, tst_data in data[job][str(build)].iteritems():
647 tbl_dict[tst_name]["cmp-data"].\
648 append(tst_data["result"]["throughput"])
652 tbl_dict.pop(tst_name, None)
655 for tst_name in tbl_dict.keys():
656 item = [tbl_dict[tst_name]["name"], ]
657 if tbl_dict[tst_name]["ref-data"]:
658 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
659 outlier_const=table["outlier-const"])
660 # TODO: Specify window size.
662 item.append(round(mean(data_t) / 1000000, 2))
663 item.append(round(stdev(data_t) / 1000000, 2))
665 item.extend([None, None])
667 item.extend([None, None])
668 if tbl_dict[tst_name]["cmp-data"]:
669 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
670 outlier_const=table["outlier-const"])
671 # TODO: Specify window size.
673 item.append(round(mean(data_t) / 1000000, 2))
674 item.append(round(stdev(data_t) / 1000000, 2))
676 item.extend([None, None])
678 item.extend([None, None])
679 if item[1] is not None and item[3] is not None and item[1] != 0:
680 item.append(int(relative_change(float(item[1]), float(item[3]))))
684 # Sort the table according to the relative change
685 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
689 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
690 table["output-file-ext"]),
691 "{0}-2t2c-full{1}".format(table["output-file"],
692 table["output-file-ext"]),
693 "{0}-4t4c-full{1}".format(table["output-file"],
694 table["output-file-ext"])
696 for file_name in tbl_names:
697 logging.info(" Writing file: '{0}'".format(file_name))
698 with open(file_name, "w") as file_handler:
699 file_handler.write(header_str)
701 if file_name.split("-")[-2] in test[0]: # cores
702 test[0] = "-".join(test[0].split("-")[:-1])
703 file_handler.write(",".join([str(item) for item in test]) +
707 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
708 "{0}-2t2c-full.txt".format(table["output-file"]),
709 "{0}-4t4c-full.txt".format(table["output-file"])
712 for i, txt_name in enumerate(tbl_names_txt):
714 logging.info(" Writing file: '{0}'".format(txt_name))
715 with open(tbl_names[i], 'rb') as csv_file:
716 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
717 for row in csv_content:
718 if txt_table is None:
719 txt_table = prettytable.PrettyTable(row)
721 txt_table.add_row(row)
722 txt_table.align["Test case"] = "l"
723 with open(txt_name, "w") as txt_file:
724 txt_file.write(str(txt_table))
727 def table_performance_trending_dashboard(table, input_data):
728 """Generate the table(s) with algorithm: table_performance_comparison
729 specified in the specification file.
731 :param table: Table to generate.
732 :param input_data: Data to process.
733 :type table: pandas.Series
734 :type input_data: InputData
737 logging.info(" Generating the table {0} ...".
738 format(table.get("title", "")))
741 logging.info(" Creating the data set for the {0} '{1}'.".
742 format(table.get("type", ""), table.get("title", "")))
743 data = input_data.filter_data(table, continue_on_error=True)
745 # Prepare the header of the tables
746 header = ["Test Case",
748 "Short-Term Change [%]",
749 "Long-Term Change [%]",
754 header_str = ",".join(header) + "\n"
756 # Prepare data to the table:
758 for job, builds in table["data"].items():
760 for tst_name, tst_data in data[job][str(build)].iteritems():
761 if tst_name.lower() in table["ignore-list"]:
763 if tbl_dict.get(tst_name, None) is None:
764 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
765 "-".join(tst_data["name"].
767 tbl_dict[tst_name] = {"name": name,
768 "data": OrderedDict()}
770 tbl_dict[tst_name]["data"][str(build)] = \
771 tst_data["result"]["throughput"]
772 except (TypeError, KeyError):
773 pass # No data in output.xml for this test
776 for tst_name in tbl_dict.keys():
777 if len(tbl_dict[tst_name]["data"]) > 2:
779 pd_data = pd.Series(tbl_dict[tst_name]["data"])
780 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
781 window=table["window"])
782 last_key = data_t.keys()[-1]
783 win_size = min(data_t.size, table["window"])
784 win_first_idx = data_t.size - win_size
785 key_14 = data_t.keys()[win_first_idx]
786 long_win_size = min(data_t.size, table["long-trend-window"])
787 median_t = data_t.rolling(window=win_size, min_periods=2).median()
788 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
789 median_first_idx = median_t.size - long_win_size
792 [x for x in median_t.values[median_first_idx:-win_size]
797 last_median_t = median_t[last_key]
801 median_t_14 = median_t[key_14]
805 # Classification list:
806 classification_lst = list()
807 for build_nr, value in data_t.iteritems():
808 if isnan(median_t[build_nr]) \
809 or isnan(stdev_t[build_nr]) \
811 classification_lst.append("outlier")
812 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
813 classification_lst.append("regression")
814 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
815 classification_lst.append("progression")
817 classification_lst.append("normal")
819 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
820 rel_change_last = nan
822 rel_change_last = round(
823 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
825 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
826 rel_change_long = nan
828 rel_change_long = round(
829 ((last_median_t - max_median) / max_median) * 100, 2)
832 [tbl_dict[tst_name]["name"],
833 '-' if isnan(last_median_t) else
834 round(last_median_t / 1000000, 2),
835 '-' if isnan(rel_change_last) else rel_change_last,
836 '-' if isnan(rel_change_long) else rel_change_long,
837 classification_lst[win_first_idx:].count("regression"),
838 classification_lst[win_first_idx:].count("progression"),
839 classification_lst[win_first_idx:].count("outlier")])
841 tbl_lst.sort(key=lambda rel: rel[0])
844 for nrr in range(table["window"], -1, -1):
845 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
846 for nrp in range(table["window"], -1, -1):
847 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
848 for nro in range(table["window"], -1, -1):
849 tbl_out = [item for item in tbl_pro if item[6] == nro]
850 tbl_out.sort(key=lambda rel: rel[2])
851 tbl_sorted.extend(tbl_out)
853 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
855 logging.info(" Writing file: '{0}'".format(file_name))
856 with open(file_name, "w") as file_handler:
857 file_handler.write(header_str)
858 for test in tbl_sorted:
859 file_handler.write(",".join([str(item) for item in test]) + '\n')
861 txt_file_name = "{0}.txt".format(table["output-file"])
863 logging.info(" Writing file: '{0}'".format(txt_file_name))
864 with open(file_name, 'rb') as csv_file:
865 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
866 for row in csv_content:
867 if txt_table is None:
868 txt_table = prettytable.PrettyTable(row)
870 txt_table.add_row(row)
871 txt_table.align["Test case"] = "l"
872 with open(txt_file_name, "w") as txt_file:
873 txt_file.write(str(txt_table))
876 def table_performance_trending_dashboard_html(table, input_data):
877 """Generate the table(s) with algorithm:
878 table_performance_trending_dashboard_html specified in the specification
881 :param table: Table to generate.
882 :param input_data: Data to process.
883 :type table: pandas.Series
884 :type input_data: InputData
887 logging.info(" Generating the table {0} ...".
888 format(table.get("title", "")))
891 with open(table["input-file"], 'rb') as csv_file:
892 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
893 csv_lst = [item for item in csv_content]
895 logging.warning("The input file is not defined.")
897 except csv.Error as err:
898 logging.warning("Not possible to process the file '{0}'.\n{1}".
899 format(table["input-file"], err))
903 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
906 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
907 for idx, item in enumerate(csv_lst[0]):
908 alignment = "left" if idx == 0 else "center"
909 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
913 colors = {"regression": ("#ffcccc", "#ff9999"),
914 "progression": ("#c6ecc6", "#9fdf9f"),
915 "outlier": ("#e6e6e6", "#cccccc"),
916 "normal": ("#e9f1fb", "#d4e4f7")}
917 for r_idx, row in enumerate(csv_lst[1:]):
921 color = "progression"
926 background = colors[color][r_idx % 2]
927 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
930 for c_idx, item in enumerate(row):
931 alignment = "left" if c_idx == 0 else "center"
932 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
940 file_name = "container_memif.html"
942 elif "vhost" in item:
943 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
944 file_name = "vm_vhost_l2.html"
945 elif "ip4base" in item:
946 file_name = "vm_vhost_ip4.html"
948 elif "ipsec" in item:
949 file_name = "ipsec.html"
951 elif "ethip4lispip" in item or "ethip4vxlan" in item:
952 file_name = "ip4_tunnels.html"
954 elif "ip4base" in item or "ip4scale" in item:
955 file_name = "ip4.html"
956 if "iacl" in item or "snat" in item or "cop" in item:
957 feature = "-features"
959 elif "ip6base" in item or "ip6scale" in item:
960 file_name = "ip6.html"
962 elif "l2xcbase" in item or "l2xcscale" in item \
963 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
964 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
965 file_name = "l2.html"
967 feature = "-features"
973 elif "xl710" in item:
982 elif "9000b" in item:
994 url = url + file_name + anchor + feature
996 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1003 with open(table["output-file"], 'w') as html_file:
1004 logging.info(" Writing file: '{0}'".
1005 format(table["output-file"]))
1006 html_file.write(".. raw:: html\n\n\t")
1007 html_file.write(ET.tostring(dashboard))
1008 html_file.write("\n\t<p><br><br></p>\n")
1010 logging.warning("The output file is not defined.")