1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from collections import OrderedDict
25 from numpy import nan, isnan
26 from xml.etree import ElementTree as ET
28 from errors import PresentationError
29 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
32 def generate_tables(spec, data):
33 """Generate all tables specified in the specification file.
35 :param spec: Specification read from the specification file.
36 :param data: Data to process.
37 :type spec: Specification
41 logging.info("Generating the tables ...")
42 for table in spec.tables:
44 eval(table["algorithm"])(table, data)
46 logging.error("The algorithm '{0}' is not defined.".
47 format(table["algorithm"]))
51 def table_details(table, input_data):
52 """Generate the table(s) with algorithm: table_detailed_test_results
53 specified in the specification file.
55 :param table: Table to generate.
56 :param input_data: Data to process.
57 :type table: pandas.Series
58 :type input_data: InputData
61 logging.info(" Generating the table {0} ...".
62 format(table.get("title", "")))
65 data = input_data.filter_data(table)
67 # Prepare the header of the tables
69 for column in table["columns"]:
70 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72 # Generate the data for the table according to the model in the table
74 job = table["data"].keys()[0]
75 build = str(table["data"][job][0])
77 suites = input_data.suites(job, build)
79 logging.error(" No data available. The table will not be generated.")
82 for suite_longname, suite in suites.iteritems():
84 suite_name = suite["name"]
86 for test in data[job][build].keys():
87 if data[job][build][test]["parent"] in suite_name:
89 for column in table["columns"]:
91 col_data = str(data[job][build][test][column["data"].
92 split(" ")[1]]).replace('"', '""')
93 if column["data"].split(" ")[1] in ("vat-history",
95 col_data = replace(col_data, " |br| ", "",
97 col_data = " |prein| {0} |preout| ".\
99 row_lst.append('"{0}"'.format(col_data))
101 row_lst.append("No data")
102 table_lst.append(row_lst)
104 # Write the data to file
106 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
107 table["output-file-ext"])
108 logging.info(" Writing file: '{}'".format(file_name))
109 with open(file_name, "w") as file_handler:
110 file_handler.write(",".join(header) + "\n")
111 for item in table_lst:
112 file_handler.write(",".join(item) + "\n")
114 logging.info(" Done.")
117 def table_merged_details(table, input_data):
118 """Generate the table(s) with algorithm: table_merged_details
119 specified in the specification file.
121 :param table: Table to generate.
122 :param input_data: Data to process.
123 :type table: pandas.Series
124 :type input_data: InputData
127 logging.info(" Generating the table {0} ...".
128 format(table.get("title", "")))
131 data = input_data.filter_data(table)
132 data = input_data.merge_data(data)
133 data.sort_index(inplace=True)
135 suites = input_data.filter_data(table, data_set="suites")
136 suites = input_data.merge_data(suites)
138 # Prepare the header of the tables
140 for column in table["columns"]:
141 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
143 for _, suite in suites.iteritems():
145 suite_name = suite["name"]
147 for test in data.keys():
148 if data[test]["parent"] in suite_name:
150 for column in table["columns"]:
152 col_data = str(data[test][column["data"].
153 split(" ")[1]]).replace('"', '""')
154 if column["data"].split(" ")[1] in ("vat-history",
156 col_data = replace(col_data, " |br| ", "",
158 col_data = " |prein| {0} |preout| ".\
159 format(col_data[:-5])
160 row_lst.append('"{0}"'.format(col_data))
162 row_lst.append("No data")
163 table_lst.append(row_lst)
165 # Write the data to file
167 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
168 table["output-file-ext"])
169 logging.info(" Writing file: '{}'".format(file_name))
170 with open(file_name, "w") as file_handler:
171 file_handler.write(",".join(header) + "\n")
172 for item in table_lst:
173 file_handler.write(",".join(item) + "\n")
175 logging.info(" Done.")
178 def table_performance_improvements(table, input_data):
179 """Generate the table(s) with algorithm: table_performance_improvements
180 specified in the specification file.
182 :param table: Table to generate.
183 :param input_data: Data to process.
184 :type table: pandas.Series
185 :type input_data: InputData
188 def _write_line_to_file(file_handler, data):
189 """Write a line to the .csv file.
191 :param file_handler: File handler for the csv file. It must be open for
193 :param data: Item to be written to the file.
194 :type file_handler: BinaryIO
200 if isinstance(item["data"], str):
201 # Remove -?drdisc from the end
202 if item["data"].endswith("drdisc"):
203 item["data"] = item["data"][:-8]
204 line_lst.append(item["data"])
205 elif isinstance(item["data"], float):
206 line_lst.append("{:.1f}".format(item["data"]))
207 elif item["data"] is None:
209 file_handler.write(",".join(line_lst) + "\n")
211 logging.info(" Generating the table {0} ...".
212 format(table.get("title", "")))
215 file_name = table.get("template", None)
218 tmpl = _read_csv_template(file_name)
219 except PresentationError:
220 logging.error(" The template '{0}' does not exist. Skipping the "
221 "table.".format(file_name))
224 logging.error("The template is not defined. Skipping the table.")
228 data = input_data.filter_data(table)
230 # Prepare the header of the tables
232 for column in table["columns"]:
233 header.append(column["title"])
235 # Generate the data for the table according to the model in the table
238 for tmpl_item in tmpl:
240 for column in table["columns"]:
241 cmd = column["data"].split(" ")[0]
242 args = column["data"].split(" ")[1:]
243 if cmd == "template":
245 val = float(tmpl_item[int(args[0])])
247 val = tmpl_item[int(args[0])]
248 tbl_item.append({"data": val})
254 for build in data[job]:
256 data_lst.append(float(build[tmpl_item[0]]
257 ["throughput"]["value"]))
258 except (KeyError, TypeError):
262 tbl_item.append({"data": (eval(operation)(data_lst)) /
265 tbl_item.append({"data": None})
266 elif cmd == "operation":
269 nr1 = float(tbl_item[int(args[1])]["data"])
270 nr2 = float(tbl_item[int(args[2])]["data"])
272 tbl_item.append({"data": eval(operation)(nr1, nr2)})
274 tbl_item.append({"data": None})
275 except (IndexError, ValueError, TypeError):
276 logging.error("No data for {0}".format(tbl_item[0]["data"]))
277 tbl_item.append({"data": None})
280 logging.error("Not supported command {0}. Skipping the table.".
283 tbl_lst.append(tbl_item)
285 # Sort the table according to the relative change
286 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
288 # Create the tables and write them to the files
290 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
291 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
293 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
296 for file_name in file_names:
297 logging.info(" Writing the file '{0}'".format(file_name))
298 with open(file_name, "w") as file_handler:
299 file_handler.write(",".join(header) + "\n")
301 if isinstance(item[-1]["data"], float):
302 rel_change = round(item[-1]["data"], 1)
304 rel_change = item[-1]["data"]
305 if "ndr_top" in file_name \
306 and "ndr" in item[0]["data"] \
307 and rel_change >= 10.0:
308 _write_line_to_file(file_handler, item)
309 elif "pdr_top" in file_name \
310 and "pdr" in item[0]["data"] \
311 and rel_change >= 10.0:
312 _write_line_to_file(file_handler, item)
313 elif "ndr_low" in file_name \
314 and "ndr" in item[0]["data"] \
315 and rel_change < 10.0:
316 _write_line_to_file(file_handler, item)
317 elif "pdr_low" in file_name \
318 and "pdr" in item[0]["data"] \
319 and rel_change < 10.0:
320 _write_line_to_file(file_handler, item)
322 logging.info(" Done.")
325 def _read_csv_template(file_name):
326 """Read the template from a .csv file.
328 :param file_name: Name / full path / relative path of the file to read.
330 :returns: Data from the template as list (lines) of lists (items on line).
332 :raises: PresentationError if it is not possible to read the file.
336 with open(file_name, 'r') as csv_file:
338 for line in csv_file:
339 tmpl_data.append(line[:-1].split(","))
341 except IOError as err:
342 raise PresentationError(str(err), level="ERROR")
345 def table_performance_comparison(table, input_data):
346 """Generate the table(s) with algorithm: table_performance_comparison
347 specified in the specification file.
349 :param table: Table to generate.
350 :param input_data: Data to process.
351 :type table: pandas.Series
352 :type input_data: InputData
355 logging.info(" Generating the table {0} ...".
356 format(table.get("title", "")))
359 data = input_data.filter_data(table, continue_on_error=True)
361 # Prepare the header of the tables
363 header = ["Test case", ]
365 history = table.get("history", None)
369 ["{0} Throughput [Mpps]".format(item["title"]),
370 "{0} Stdev [Mpps]".format(item["title"])])
372 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
373 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
374 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
375 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
377 header_str = ",".join(header) + "\n"
378 except (AttributeError, KeyError) as err:
379 logging.error("The model is invalid, missing parameter: {0}".
383 # Prepare data to the table:
385 for job, builds in table["reference"]["data"].items():
387 for tst_name, tst_data in data[job][str(build)].iteritems():
388 if tbl_dict.get(tst_name, None) is None:
389 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
390 "-".join(tst_data["name"].
392 tbl_dict[tst_name] = {"name": name,
396 tbl_dict[tst_name]["ref-data"].\
397 append(tst_data["throughput"]["value"])
399 pass # No data in output.xml for this test
401 for job, builds in table["compare"]["data"].items():
403 for tst_name, tst_data in data[job][str(build)].iteritems():
405 tbl_dict[tst_name]["cmp-data"].\
406 append(tst_data["throughput"]["value"])
410 tbl_dict.pop(tst_name, None)
413 for job, builds in item["data"].items():
415 for tst_name, tst_data in data[job][str(build)].iteritems():
416 if tbl_dict.get(tst_name, None) is None:
418 if tbl_dict[tst_name].get("history", None) is None:
419 tbl_dict[tst_name]["history"] = OrderedDict()
420 if tbl_dict[tst_name]["history"].get(item["title"],
422 tbl_dict[tst_name]["history"][item["title"]] = \
425 tbl_dict[tst_name]["history"][item["title"]].\
426 append(tst_data["throughput"]["value"])
427 except (TypeError, KeyError):
431 for tst_name in tbl_dict.keys():
432 item = [tbl_dict[tst_name]["name"], ]
434 if tbl_dict[tst_name].get("history", None) is not None:
435 for hist_data in tbl_dict[tst_name]["history"].values():
437 data_t = remove_outliers(
438 hist_data, outlier_const=table["outlier-const"])
440 item.append(round(mean(data_t) / 1000000, 2))
441 item.append(round(stdev(data_t) / 1000000, 2))
443 item.extend([None, None])
445 item.extend([None, None])
447 item.extend([None, None])
448 if tbl_dict[tst_name]["ref-data"]:
449 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
450 outlier_const=table["outlier-const"])
451 # TODO: Specify window size.
453 item.append(round(mean(data_t) / 1000000, 2))
454 item.append(round(stdev(data_t) / 1000000, 2))
456 item.extend([None, None])
458 item.extend([None, None])
459 if tbl_dict[tst_name]["cmp-data"]:
460 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
461 outlier_const=table["outlier-const"])
462 # TODO: Specify window size.
464 item.append(round(mean(data_t) / 1000000, 2))
465 item.append(round(stdev(data_t) / 1000000, 2))
467 item.extend([None, None])
469 item.extend([None, None])
470 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
471 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
472 if len(item) == len(header):
475 # Sort the table according to the relative change
476 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
480 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
481 table["output-file-ext"]),
482 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
483 table["output-file-ext"]),
484 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
485 table["output-file-ext"]),
486 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
487 table["output-file-ext"]),
488 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
489 table["output-file-ext"]),
490 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
491 table["output-file-ext"])
493 for file_name in tbl_names:
494 logging.info(" Writing file: '{0}'".format(file_name))
495 with open(file_name, "w") as file_handler:
496 file_handler.write(header_str)
498 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
499 file_name.split("-")[-2] in test[0]): # cores
500 test[0] = "-".join(test[0].split("-")[:-1])
501 file_handler.write(",".join([str(item) for item in test]) +
505 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
506 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
507 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
508 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
509 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
510 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
513 for i, txt_name in enumerate(tbl_names_txt):
515 logging.info(" Writing file: '{0}'".format(txt_name))
516 with open(tbl_names[i], 'rb') as csv_file:
517 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
518 for row in csv_content:
519 if txt_table is None:
520 txt_table = prettytable.PrettyTable(row)
522 txt_table.add_row(row)
523 txt_table.align["Test case"] = "l"
524 with open(txt_name, "w") as txt_file:
525 txt_file.write(str(txt_table))
527 # Selected tests in csv:
528 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
529 table["output-file-ext"])
530 with open(input_file, "r") as in_file:
535 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
536 table["output-file-ext"])
537 logging.info(" Writing file: '{0}'".format(output_file))
538 with open(output_file, "w") as out_file:
539 out_file.write(header_str)
540 for i, line in enumerate(lines[1:]):
541 if i == table["nr-of-tests-shown"]:
545 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
546 table["output-file-ext"])
547 logging.info(" Writing file: '{0}'".format(output_file))
548 with open(output_file, "w") as out_file:
549 out_file.write(header_str)
550 for i, line in enumerate(lines[-1:0:-1]):
551 if i == table["nr-of-tests-shown"]:
555 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
556 table["output-file-ext"])
557 with open(input_file, "r") as in_file:
562 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
563 table["output-file-ext"])
564 logging.info(" Writing file: '{0}'".format(output_file))
565 with open(output_file, "w") as out_file:
566 out_file.write(header_str)
567 for i, line in enumerate(lines[1:]):
568 if i == table["nr-of-tests-shown"]:
572 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
573 table["output-file-ext"])
574 logging.info(" Writing file: '{0}'".format(output_file))
575 with open(output_file, "w") as out_file:
576 out_file.write(header_str)
577 for i, line in enumerate(lines[-1:0:-1]):
578 if i == table["nr-of-tests-shown"]:
583 def table_performance_comparison_mrr(table, input_data):
584 """Generate the table(s) with algorithm: table_performance_comparison_mrr
585 specified in the specification file.
587 :param table: Table to generate.
588 :param input_data: Data to process.
589 :type table: pandas.Series
590 :type input_data: InputData
593 logging.info(" Generating the table {0} ...".
594 format(table.get("title", "")))
597 data = input_data.filter_data(table, continue_on_error=True)
599 # Prepare the header of the tables
601 header = ["Test case",
602 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
603 "{0} stdev [Mpps]".format(table["reference"]["title"]),
604 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
605 "{0} stdev [Mpps]".format(table["compare"]["title"]),
607 header_str = ",".join(header) + "\n"
608 except (AttributeError, KeyError) as err:
609 logging.error("The model is invalid, missing parameter: {0}".
613 # Prepare data to the table:
615 for job, builds in table["reference"]["data"].items():
617 for tst_name, tst_data in data[job][str(build)].iteritems():
618 if tbl_dict.get(tst_name, None) is None:
619 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
620 "-".join(tst_data["name"].
622 tbl_dict[tst_name] = {"name": name,
626 tbl_dict[tst_name]["ref-data"].\
627 append(tst_data["result"]["throughput"])
629 pass # No data in output.xml for this test
631 for job, builds in table["compare"]["data"].items():
633 for tst_name, tst_data in data[job][str(build)].iteritems():
635 tbl_dict[tst_name]["cmp-data"].\
636 append(tst_data["result"]["throughput"])
640 tbl_dict.pop(tst_name, None)
643 for tst_name in tbl_dict.keys():
644 item = [tbl_dict[tst_name]["name"], ]
645 if tbl_dict[tst_name]["ref-data"]:
646 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
647 outlier_const=table["outlier-const"])
648 # TODO: Specify window size.
650 item.append(round(mean(data_t) / 1000000, 2))
651 item.append(round(stdev(data_t) / 1000000, 2))
653 item.extend([None, None])
655 item.extend([None, None])
656 if tbl_dict[tst_name]["cmp-data"]:
657 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
658 outlier_const=table["outlier-const"])
659 # TODO: Specify window size.
661 item.append(round(mean(data_t) / 1000000, 2))
662 item.append(round(stdev(data_t) / 1000000, 2))
664 item.extend([None, None])
666 item.extend([None, None])
667 if item[1] is not None and item[3] is not None and item[1] != 0:
668 item.append(int(relative_change(float(item[1]), float(item[3]))))
672 # Sort the table according to the relative change
673 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
677 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
678 table["output-file-ext"]),
679 "{0}-2t2c-full{1}".format(table["output-file"],
680 table["output-file-ext"]),
681 "{0}-4t4c-full{1}".format(table["output-file"],
682 table["output-file-ext"])
684 for file_name in tbl_names:
685 logging.info(" Writing file: '{0}'".format(file_name))
686 with open(file_name, "w") as file_handler:
687 file_handler.write(header_str)
689 if file_name.split("-")[-2] in test[0]: # cores
690 test[0] = "-".join(test[0].split("-")[:-1])
691 file_handler.write(",".join([str(item) for item in test]) +
695 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
696 "{0}-2t2c-full.txt".format(table["output-file"]),
697 "{0}-4t4c-full.txt".format(table["output-file"])
700 for i, txt_name in enumerate(tbl_names_txt):
702 logging.info(" Writing file: '{0}'".format(txt_name))
703 with open(tbl_names[i], 'rb') as csv_file:
704 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
705 for row in csv_content:
706 if txt_table is None:
707 txt_table = prettytable.PrettyTable(row)
709 txt_table.add_row(row)
710 txt_table.align["Test case"] = "l"
711 with open(txt_name, "w") as txt_file:
712 txt_file.write(str(txt_table))
715 def table_performance_trending_dashboard(table, input_data):
716 """Generate the table(s) with algorithm: table_performance_comparison
717 specified in the specification file.
719 :param table: Table to generate.
720 :param input_data: Data to process.
721 :type table: pandas.Series
722 :type input_data: InputData
725 logging.info(" Generating the table {0} ...".
726 format(table.get("title", "")))
729 data = input_data.filter_data(table, continue_on_error=True)
731 # Prepare the header of the tables
732 header = ["Test Case",
734 "Short-Term Change [%]",
735 "Long-Term Change [%]",
740 header_str = ",".join(header) + "\n"
742 # Prepare data to the table:
744 for job, builds in table["data"].items():
746 for tst_name, tst_data in data[job][str(build)].iteritems():
747 if tst_name.lower() in table["ignore-list"]:
749 if tbl_dict.get(tst_name, None) is None:
750 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
751 "-".join(tst_data["name"].
753 tbl_dict[tst_name] = {"name": name,
754 "data": OrderedDict()}
756 tbl_dict[tst_name]["data"][str(build)] = \
757 tst_data["result"]["throughput"]
758 except (TypeError, KeyError):
759 pass # No data in output.xml for this test
762 for tst_name in tbl_dict.keys():
763 if len(tbl_dict[tst_name]["data"]) > 2:
765 pd_data = pd.Series(tbl_dict[tst_name]["data"])
766 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
767 window=table["window"])
768 last_key = data_t.keys()[-1]
769 win_size = min(data_t.size, table["window"])
770 win_first_idx = data_t.size - win_size
771 key_14 = data_t.keys()[win_first_idx]
772 long_win_size = min(data_t.size, table["long-trend-window"])
773 median_t = data_t.rolling(window=win_size, min_periods=2).median()
774 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
775 median_first_idx = median_t.size - long_win_size
778 [x for x in median_t.values[median_first_idx:-win_size]
783 last_median_t = median_t[last_key]
787 median_t_14 = median_t[key_14]
791 # Classification list:
792 classification_lst = list()
793 for build_nr, value in data_t.iteritems():
794 if isnan(median_t[build_nr]) \
795 or isnan(stdev_t[build_nr]) \
797 classification_lst.append("outlier")
798 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
799 classification_lst.append("regression")
800 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
801 classification_lst.append("progression")
803 classification_lst.append("normal")
805 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
806 rel_change_last = nan
808 rel_change_last = round(
809 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
811 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
812 rel_change_long = nan
814 rel_change_long = round(
815 ((last_median_t - max_median) / max_median) * 100, 2)
818 [tbl_dict[tst_name]["name"],
819 '-' if isnan(last_median_t) else
820 round(last_median_t / 1000000, 2),
821 '-' if isnan(rel_change_last) else rel_change_last,
822 '-' if isnan(rel_change_long) else rel_change_long,
823 classification_lst[win_first_idx:].count("regression"),
824 classification_lst[win_first_idx:].count("progression"),
825 classification_lst[win_first_idx:].count("outlier")])
827 tbl_lst.sort(key=lambda rel: rel[0])
830 for nrr in range(table["window"], -1, -1):
831 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
832 for nrp in range(table["window"], -1, -1):
833 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
834 for nro in range(table["window"], -1, -1):
835 tbl_out = [item for item in tbl_pro if item[6] == nro]
836 tbl_out.sort(key=lambda rel: rel[2])
837 tbl_sorted.extend(tbl_out)
839 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
841 logging.info(" Writing file: '{0}'".format(file_name))
842 with open(file_name, "w") as file_handler:
843 file_handler.write(header_str)
844 for test in tbl_sorted:
845 file_handler.write(",".join([str(item) for item in test]) + '\n')
847 txt_file_name = "{0}.txt".format(table["output-file"])
849 logging.info(" Writing file: '{0}'".format(txt_file_name))
850 with open(file_name, 'rb') as csv_file:
851 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
852 for row in csv_content:
853 if txt_table is None:
854 txt_table = prettytable.PrettyTable(row)
856 txt_table.add_row(row)
857 txt_table.align["Test case"] = "l"
858 with open(txt_file_name, "w") as txt_file:
859 txt_file.write(str(txt_table))
862 def table_performance_trending_dashboard_html(table, input_data):
863 """Generate the table(s) with algorithm:
864 table_performance_trending_dashboard_html specified in the specification
867 :param table: Table to generate.
868 :param input_data: Data to process.
869 :type table: pandas.Series
870 :type input_data: InputData
873 logging.info(" Generating the table {0} ...".
874 format(table.get("title", "")))
877 with open(table["input-file"], 'rb') as csv_file:
878 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
879 csv_lst = [item for item in csv_content]
881 logging.warning("The input file is not defined.")
883 except csv.Error as err:
884 logging.warning("Not possible to process the file '{0}'.\n{1}".
885 format(table["input-file"], err))
889 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
892 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
893 for idx, item in enumerate(csv_lst[0]):
894 alignment = "left" if idx == 0 else "center"
895 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
899 colors = {"regression": ("#ffcccc", "#ff9999"),
900 "progression": ("#c6ecc6", "#9fdf9f"),
901 "outlier": ("#e6e6e6", "#cccccc"),
902 "normal": ("#e9f1fb", "#d4e4f7")}
903 for r_idx, row in enumerate(csv_lst[1:]):
907 color = "progression"
912 background = colors[color][r_idx % 2]
913 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
916 for c_idx, item in enumerate(row):
917 alignment = "left" if c_idx == 0 else "center"
918 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
926 file_name = "container_memif.html"
928 elif "vhost" in item:
929 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
930 file_name = "vm_vhost_l2.html"
931 elif "ip4base" in item:
932 file_name = "vm_vhost_ip4.html"
934 elif "ipsec" in item:
935 file_name = "ipsec.html"
937 elif "ethip4lispip" in item or "ethip4vxlan" in item:
938 file_name = "ip4_tunnels.html"
940 elif "ip4base" in item or "ip4scale" in item:
941 file_name = "ip4.html"
942 if "iacl" in item or "snat" in item or "cop" in item:
943 feature = "-features"
945 elif "ip6base" in item or "ip6scale" in item:
946 file_name = "ip6.html"
948 elif "l2xcbase" in item or "l2xcscale" in item \
949 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
950 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
951 file_name = "l2.html"
953 feature = "-features"
959 elif "xl710" in item:
968 elif "9000b" in item:
980 url = url + file_name + anchor + feature
982 ref = ET.SubElement(td, "a", attrib=dict(href=url))
989 with open(table["output-file"], 'w') as html_file:
990 logging.info(" Writing file: '{0}'".
991 format(table["output-file"]))
992 html_file.write(".. raw:: html\n\n\t")
993 html_file.write(ET.tostring(dashboard))
994 html_file.write("\n\t<p><br><br></p>\n")
996 logging.warning("The output file is not defined.")