1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from math import isnan
25 from collections import OrderedDict
27 from xml.etree import ElementTree as ET
29 from errors import PresentationError
30 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
33 def generate_tables(spec, data):
34 """Generate all tables specified in the specification file.
36 :param spec: Specification read from the specification file.
37 :param data: Data to process.
38 :type spec: Specification
42 logging.info("Generating the tables ...")
43 for table in spec.tables:
45 eval(table["algorithm"])(table, data)
47 logging.error("The algorithm '{0}' is not defined.".
48 format(table["algorithm"]))
52 def table_details(table, input_data):
53 """Generate the table(s) with algorithm: table_detailed_test_results
54 specified in the specification file.
56 :param table: Table to generate.
57 :param input_data: Data to process.
58 :type table: pandas.Series
59 :type input_data: InputData
62 logging.info(" Generating the table {0} ...".
63 format(table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 data = input_data.filter_data(table)
133 data = input_data.merge_data(data)
134 data.sort_index(inplace=True)
136 suites = input_data.filter_data(table, data_set="suites")
137 suites = input_data.merge_data(suites)
139 # Prepare the header of the tables
141 for column in table["columns"]:
142 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
144 for _, suite in suites.iteritems():
146 suite_name = suite["name"]
148 for test in data.keys():
149 if data[test]["parent"] in suite_name:
151 for column in table["columns"]:
153 col_data = str(data[test][column["data"].
154 split(" ")[1]]).replace('"', '""')
155 if column["data"].split(" ")[1] in ("vat-history",
157 col_data = replace(col_data, " |br| ", "",
159 col_data = " |prein| {0} |preout| ".\
160 format(col_data[:-5])
161 row_lst.append('"{0}"'.format(col_data))
163 row_lst.append("No data")
164 table_lst.append(row_lst)
166 # Write the data to file
168 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
169 table["output-file-ext"])
170 logging.info(" Writing file: '{}'".format(file_name))
171 with open(file_name, "w") as file_handler:
172 file_handler.write(",".join(header) + "\n")
173 for item in table_lst:
174 file_handler.write(",".join(item) + "\n")
176 logging.info(" Done.")
179 def table_performance_improvements(table, input_data):
180 """Generate the table(s) with algorithm: table_performance_improvements
181 specified in the specification file.
183 :param table: Table to generate.
184 :param input_data: Data to process.
185 :type table: pandas.Series
186 :type input_data: InputData
189 def _write_line_to_file(file_handler, data):
190 """Write a line to the .csv file.
192 :param file_handler: File handler for the csv file. It must be open for
194 :param data: Item to be written to the file.
195 :type file_handler: BinaryIO
201 if isinstance(item["data"], str):
202 # Remove -?drdisc from the end
203 if item["data"].endswith("drdisc"):
204 item["data"] = item["data"][:-8]
205 line_lst.append(item["data"])
206 elif isinstance(item["data"], float):
207 line_lst.append("{:.1f}".format(item["data"]))
208 elif item["data"] is None:
210 file_handler.write(",".join(line_lst) + "\n")
212 logging.info(" Generating the table {0} ...".
213 format(table.get("title", "")))
216 file_name = table.get("template", None)
219 tmpl = _read_csv_template(file_name)
220 except PresentationError:
221 logging.error(" The template '{0}' does not exist. Skipping the "
222 "table.".format(file_name))
225 logging.error("The template is not defined. Skipping the table.")
229 data = input_data.filter_data(table)
231 # Prepare the header of the tables
233 for column in table["columns"]:
234 header.append(column["title"])
236 # Generate the data for the table according to the model in the table
239 for tmpl_item in tmpl:
241 for column in table["columns"]:
242 cmd = column["data"].split(" ")[0]
243 args = column["data"].split(" ")[1:]
244 if cmd == "template":
246 val = float(tmpl_item[int(args[0])])
248 val = tmpl_item[int(args[0])]
249 tbl_item.append({"data": val})
255 for build in data[job]:
257 data_lst.append(float(build[tmpl_item[0]]
258 ["throughput"]["value"]))
259 except (KeyError, TypeError):
263 tbl_item.append({"data": (eval(operation)(data_lst)) /
266 tbl_item.append({"data": None})
267 elif cmd == "operation":
270 nr1 = float(tbl_item[int(args[1])]["data"])
271 nr2 = float(tbl_item[int(args[2])]["data"])
273 tbl_item.append({"data": eval(operation)(nr1, nr2)})
275 tbl_item.append({"data": None})
276 except (IndexError, ValueError, TypeError):
277 logging.error("No data for {0}".format(tbl_item[0]["data"]))
278 tbl_item.append({"data": None})
281 logging.error("Not supported command {0}. Skipping the table.".
284 tbl_lst.append(tbl_item)
286 # Sort the table according to the relative change
287 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
289 # Create the tables and write them to the files
291 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
293 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
294 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
297 for file_name in file_names:
298 logging.info(" Writing the file '{0}'".format(file_name))
299 with open(file_name, "w") as file_handler:
300 file_handler.write(",".join(header) + "\n")
302 if isinstance(item[-1]["data"], float):
303 rel_change = round(item[-1]["data"], 1)
305 rel_change = item[-1]["data"]
306 if "ndr_top" in file_name \
307 and "ndr" in item[0]["data"] \
308 and rel_change >= 10.0:
309 _write_line_to_file(file_handler, item)
310 elif "pdr_top" in file_name \
311 and "pdr" in item[0]["data"] \
312 and rel_change >= 10.0:
313 _write_line_to_file(file_handler, item)
314 elif "ndr_low" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change < 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_low" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change < 10.0:
321 _write_line_to_file(file_handler, item)
323 logging.info(" Done.")
326 def _read_csv_template(file_name):
327 """Read the template from a .csv file.
329 :param file_name: Name / full path / relative path of the file to read.
331 :returns: Data from the template as list (lines) of lists (items on line).
333 :raises: PresentationError if it is not possible to read the file.
337 with open(file_name, 'r') as csv_file:
339 for line in csv_file:
340 tmpl_data.append(line[:-1].split(","))
342 except IOError as err:
343 raise PresentationError(str(err), level="ERROR")
346 def table_performance_comparison(table, input_data):
347 """Generate the table(s) with algorithm: table_performance_comparison
348 specified in the specification file.
350 :param table: Table to generate.
351 :param input_data: Data to process.
352 :type table: pandas.Series
353 :type input_data: InputData
356 logging.info(" Generating the table {0} ...".
357 format(table.get("title", "")))
360 data = input_data.filter_data(table, continue_on_error=True)
362 # Prepare the header of the tables
364 header = ["Test case", ]
366 history = table.get("history", None)
370 ["{0} Throughput [Mpps]".format(item["title"]),
371 "{0} Stdev [Mpps]".format(item["title"])])
373 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
374 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
375 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
376 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
378 header_str = ",".join(header) + "\n"
379 except (AttributeError, KeyError) as err:
380 logging.error("The model is invalid, missing parameter: {0}".
384 # Prepare data to the table:
386 for job, builds in table["reference"]["data"].items():
388 for tst_name, tst_data in data[job][str(build)].iteritems():
389 if tbl_dict.get(tst_name, None) is None:
390 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
391 "-".join(tst_data["name"].
393 tbl_dict[tst_name] = {"name": name,
397 tbl_dict[tst_name]["ref-data"].\
398 append(tst_data["throughput"]["value"])
400 pass # No data in output.xml for this test
402 for job, builds in table["compare"]["data"].items():
404 for tst_name, tst_data in data[job][str(build)].iteritems():
406 tbl_dict[tst_name]["cmp-data"].\
407 append(tst_data["throughput"]["value"])
411 tbl_dict.pop(tst_name, None)
414 for job, builds in item["data"].items():
416 for tst_name, tst_data in data[job][str(build)].iteritems():
417 if tbl_dict.get(tst_name, None) is None:
419 if tbl_dict[tst_name].get("history", None) is None:
420 tbl_dict[tst_name]["history"] = OrderedDict()
421 if tbl_dict[tst_name]["history"].get(item["title"],
423 tbl_dict[tst_name]["history"][item["title"]] = \
425 tbl_dict[tst_name]["history"][item["title"]].\
426 append(tst_data["throughput"]["value"])
429 for tst_name in tbl_dict.keys():
430 item = [tbl_dict[tst_name]["name"], ]
432 for hist_list in tbl_dict[tst_name]["history"].values():
433 for hist_data in hist_list:
435 data_t = remove_outliers(
436 hist_data, outlier_const=table["outlier-const"])
438 item.append(round(mean(data_t) / 1000000, 2))
439 item.append(round(stdev(data_t) / 1000000, 2))
441 item.extend([None, None])
443 item.extend([None, None])
444 if tbl_dict[tst_name]["ref-data"]:
445 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
446 outlier_const=table["outlier-const"])
447 # TODO: Specify window size.
449 item.append(round(mean(data_t) / 1000000, 2))
450 item.append(round(stdev(data_t) / 1000000, 2))
452 item.extend([None, None])
454 item.extend([None, None])
455 if tbl_dict[tst_name]["cmp-data"]:
456 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
457 outlier_const=table["outlier-const"])
458 # TODO: Specify window size.
460 item.append(round(mean(data_t) / 1000000, 2))
461 item.append(round(stdev(data_t) / 1000000, 2))
463 item.extend([None, None])
465 item.extend([None, None])
466 if item[-5] is not None and item[-3] is not None and item[-5] != 0:
467 item.append(int(relative_change(float(item[-5]), float(item[-3]))))
468 if len(item) == len(header):
471 # Sort the table according to the relative change
472 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
476 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
477 table["output-file-ext"]),
478 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
479 table["output-file-ext"]),
480 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
481 table["output-file-ext"]),
482 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
483 table["output-file-ext"]),
484 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
485 table["output-file-ext"]),
486 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
487 table["output-file-ext"])
489 for file_name in tbl_names:
490 logging.info(" Writing file: '{0}'".format(file_name))
491 with open(file_name, "w") as file_handler:
492 file_handler.write(header_str)
494 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
495 file_name.split("-")[-2] in test[0]): # cores
496 test[0] = "-".join(test[0].split("-")[:-1])
497 file_handler.write(",".join([str(item) for item in test]) +
501 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
502 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
503 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
504 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
505 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
506 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
509 for i, txt_name in enumerate(tbl_names_txt):
511 logging.info(" Writing file: '{0}'".format(txt_name))
512 with open(tbl_names[i], 'rb') as csv_file:
513 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
514 for row in csv_content:
515 if txt_table is None:
516 txt_table = prettytable.PrettyTable(row)
518 txt_table.add_row(row)
519 txt_table.align["Test case"] = "l"
520 with open(txt_name, "w") as txt_file:
521 txt_file.write(str(txt_table))
523 # Selected tests in csv:
524 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
525 table["output-file-ext"])
526 with open(input_file, "r") as in_file:
531 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
532 table["output-file-ext"])
533 logging.info(" Writing file: '{0}'".format(output_file))
534 with open(output_file, "w") as out_file:
535 out_file.write(header_str)
536 for i, line in enumerate(lines[1:]):
537 if i == table["nr-of-tests-shown"]:
541 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
542 table["output-file-ext"])
543 logging.info(" Writing file: '{0}'".format(output_file))
544 with open(output_file, "w") as out_file:
545 out_file.write(header_str)
546 for i, line in enumerate(lines[-1:0:-1]):
547 if i == table["nr-of-tests-shown"]:
551 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
552 table["output-file-ext"])
553 with open(input_file, "r") as in_file:
558 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
559 table["output-file-ext"])
560 logging.info(" Writing file: '{0}'".format(output_file))
561 with open(output_file, "w") as out_file:
562 out_file.write(header_str)
563 for i, line in enumerate(lines[1:]):
564 if i == table["nr-of-tests-shown"]:
568 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
569 table["output-file-ext"])
570 logging.info(" Writing file: '{0}'".format(output_file))
571 with open(output_file, "w") as out_file:
572 out_file.write(header_str)
573 for i, line in enumerate(lines[-1:0:-1]):
574 if i == table["nr-of-tests-shown"]:
579 def table_performance_comparison_mrr(table, input_data):
580 """Generate the table(s) with algorithm: table_performance_comparison_mrr
581 specified in the specification file.
583 :param table: Table to generate.
584 :param input_data: Data to process.
585 :type table: pandas.Series
586 :type input_data: InputData
589 logging.info(" Generating the table {0} ...".
590 format(table.get("title", "")))
593 data = input_data.filter_data(table, continue_on_error=True)
595 # Prepare the header of the tables
597 header = ["Test case",
598 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
599 "{0} stdev [Mpps]".format(table["reference"]["title"]),
600 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
601 "{0} stdev [Mpps]".format(table["compare"]["title"]),
603 header_str = ",".join(header) + "\n"
604 except (AttributeError, KeyError) as err:
605 logging.error("The model is invalid, missing parameter: {0}".
609 # Prepare data to the table:
611 for job, builds in table["reference"]["data"].items():
613 for tst_name, tst_data in data[job][str(build)].iteritems():
614 if tbl_dict.get(tst_name, None) is None:
615 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
616 "-".join(tst_data["name"].
618 tbl_dict[tst_name] = {"name": name,
622 tbl_dict[tst_name]["ref-data"].\
623 append(tst_data["result"]["throughput"])
625 pass # No data in output.xml for this test
627 for job, builds in table["compare"]["data"].items():
629 for tst_name, tst_data in data[job][str(build)].iteritems():
631 tbl_dict[tst_name]["cmp-data"].\
632 append(tst_data["result"]["throughput"])
636 tbl_dict.pop(tst_name, None)
639 for tst_name in tbl_dict.keys():
640 item = [tbl_dict[tst_name]["name"], ]
641 if tbl_dict[tst_name]["ref-data"]:
642 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
643 outlier_const=table["outlier-const"])
644 # TODO: Specify window size.
646 item.append(round(mean(data_t) / 1000000, 2))
647 item.append(round(stdev(data_t) / 1000000, 2))
649 item.extend([None, None])
651 item.extend([None, None])
652 if tbl_dict[tst_name]["cmp-data"]:
653 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
654 outlier_const=table["outlier-const"])
655 # TODO: Specify window size.
657 item.append(round(mean(data_t) / 1000000, 2))
658 item.append(round(stdev(data_t) / 1000000, 2))
660 item.extend([None, None])
662 item.extend([None, None])
663 if item[1] is not None and item[3] is not None and item[1] != 0:
664 item.append(int(relative_change(float(item[1]), float(item[3]))))
668 # Sort the table according to the relative change
669 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
673 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
674 table["output-file-ext"]),
675 "{0}-2t2c-full{1}".format(table["output-file"],
676 table["output-file-ext"]),
677 "{0}-4t4c-full{1}".format(table["output-file"],
678 table["output-file-ext"])
680 for file_name in tbl_names:
681 logging.info(" Writing file: '{0}'".format(file_name))
682 with open(file_name, "w") as file_handler:
683 file_handler.write(header_str)
685 if file_name.split("-")[-2] in test[0]: # cores
686 test[0] = "-".join(test[0].split("-")[:-1])
687 file_handler.write(",".join([str(item) for item in test]) +
691 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
692 "{0}-2t2c-full.txt".format(table["output-file"]),
693 "{0}-4t4c-full.txt".format(table["output-file"])
696 for i, txt_name in enumerate(tbl_names_txt):
698 logging.info(" Writing file: '{0}'".format(txt_name))
699 with open(tbl_names[i], 'rb') as csv_file:
700 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
701 for row in csv_content:
702 if txt_table is None:
703 txt_table = prettytable.PrettyTable(row)
705 txt_table.add_row(row)
706 txt_table.align["Test case"] = "l"
707 with open(txt_name, "w") as txt_file:
708 txt_file.write(str(txt_table))
711 def table_performance_trending_dashboard(table, input_data):
712 """Generate the table(s) with algorithm: table_performance_comparison
713 specified in the specification file.
715 :param table: Table to generate.
716 :param input_data: Data to process.
717 :type table: pandas.Series
718 :type input_data: InputData
721 logging.info(" Generating the table {0} ...".
722 format(table.get("title", "")))
725 data = input_data.filter_data(table, continue_on_error=True)
727 # Prepare the header of the tables
728 header = ["Test Case",
730 "Short-Term Change [%]",
731 "Long-Term Change [%]",
736 header_str = ",".join(header) + "\n"
738 # Prepare data to the table:
740 for job, builds in table["data"].items():
742 for tst_name, tst_data in data[job][str(build)].iteritems():
743 if tbl_dict.get(tst_name, None) is None:
744 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
745 "-".join(tst_data["name"].
747 tbl_dict[tst_name] = {"name": name,
750 tbl_dict[tst_name]["data"][str(build)] = \
751 tst_data["result"]["throughput"]
752 except (TypeError, KeyError):
753 pass # No data in output.xml for this test
756 for tst_name in tbl_dict.keys():
757 if len(tbl_dict[tst_name]["data"]) > 2:
759 pd_data = pd.Series(tbl_dict[tst_name]["data"])
760 last_key = pd_data.keys()[-1]
761 win_size = min(pd_data.size, table["window"])
762 win_first_idx = pd_data.size - win_size
763 key_14 = pd_data.keys()[win_first_idx]
764 long_win_size = min(pd_data.size, table["long-trend-window"])
766 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
769 median_t = data_t.rolling(window=win_size, min_periods=2).median()
770 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
771 median_first_idx = pd_data.size - long_win_size
773 max_median = max([x for x in median_t.values[median_first_idx:]
778 last_median_t = median_t[last_key]
782 median_t_14 = median_t[key_14]
787 name = tbl_dict[tst_name]["name"]
789 logging.info("{}".format(name))
790 logging.info("pd_data : {}".format(pd_data))
791 logging.info("data_t : {}".format(data_t))
792 logging.info("median_t : {}".format(median_t))
793 logging.info("last_median_t : {}".format(last_median_t))
794 logging.info("median_t_14 : {}".format(median_t_14))
795 logging.info("max_median : {}".format(max_median))
797 # Classification list:
798 classification_lst = list()
799 for build_nr, value in pd_data.iteritems():
801 if isnan(data_t[build_nr]) \
802 or isnan(median_t[build_nr]) \
803 or isnan(stdev_t[build_nr]) \
805 classification_lst.append("outlier")
806 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
807 classification_lst.append("regression")
808 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
809 classification_lst.append("progression")
811 classification_lst.append("normal")
813 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
814 rel_change_last = nan
816 rel_change_last = round(
817 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
819 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
820 rel_change_long = nan
822 rel_change_long = round(
823 ((last_median_t - max_median) / max_median) * 100, 2)
825 logging.info("rel_change_last : {}".format(rel_change_last))
826 logging.info("rel_change_long : {}".format(rel_change_long))
830 '-' if isnan(last_median_t) else
831 round(last_median_t / 1000000, 2),
832 '-' if isnan(rel_change_last) else rel_change_last,
833 '-' if isnan(rel_change_long) else rel_change_long,
834 classification_lst[win_first_idx:].count("regression"),
835 classification_lst[win_first_idx:].count("progression"),
836 classification_lst[win_first_idx:].count("outlier")])
838 tbl_lst.sort(key=lambda rel: rel[0])
841 for nrr in range(table["window"], -1, -1):
842 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
843 for nrp in range(table["window"], -1, -1):
844 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
845 for nro in range(table["window"], -1, -1):
846 tbl_out = [item for item in tbl_pro if item[5] == nro]
847 tbl_sorted.extend(tbl_out)
849 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
851 logging.info(" Writing file: '{0}'".format(file_name))
852 with open(file_name, "w") as file_handler:
853 file_handler.write(header_str)
854 for test in tbl_sorted:
855 file_handler.write(",".join([str(item) for item in test]) + '\n')
857 txt_file_name = "{0}.txt".format(table["output-file"])
859 logging.info(" Writing file: '{0}'".format(txt_file_name))
860 with open(file_name, 'rb') as csv_file:
861 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
862 for row in csv_content:
863 if txt_table is None:
864 txt_table = prettytable.PrettyTable(row)
866 txt_table.add_row(row)
867 txt_table.align["Test case"] = "l"
868 with open(txt_file_name, "w") as txt_file:
869 txt_file.write(str(txt_table))
872 def table_performance_trending_dashboard_html(table, input_data):
873 """Generate the table(s) with algorithm:
874 table_performance_trending_dashboard_html specified in the specification
877 :param table: Table to generate.
878 :param input_data: Data to process.
879 :type table: pandas.Series
880 :type input_data: InputData
883 logging.info(" Generating the table {0} ...".
884 format(table.get("title", "")))
887 with open(table["input-file"], 'rb') as csv_file:
888 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
889 csv_lst = [item for item in csv_content]
891 logging.warning("The input file is not defined.")
893 except csv.Error as err:
894 logging.warning("Not possible to process the file '{0}'.\n{1}".
895 format(table["input-file"], err))
899 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
902 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
903 for idx, item in enumerate(csv_lst[0]):
904 alignment = "left" if idx == 0 else "center"
905 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
909 for r_idx, row in enumerate(csv_lst[1:]):
910 background = "#D4E4F7" if r_idx % 2 else "white"
911 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
914 for c_idx, item in enumerate(row):
915 alignment = "left" if c_idx == 0 else "center"
916 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
924 file_name = "container_memif.html"
926 elif "vhost" in item:
927 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
928 file_name = "vm_vhost_l2.html"
929 elif "ip4base" in item:
930 file_name = "vm_vhost_ip4.html"
932 elif "ipsec" in item:
933 file_name = "ipsec.html"
935 elif "ethip4lispip" in item or "ethip4vxlan" in item:
936 file_name = "ip4_tunnels.html"
938 elif "ip4base" in item or "ip4scale" in item:
939 file_name = "ip4.html"
940 if "iacl" in item or "snat" in item or "cop" in item:
941 feature = "-features"
943 elif "ip6base" in item or "ip6scale" in item:
944 file_name = "ip6.html"
946 elif "l2xcbase" in item or "l2xcscale" in item \
947 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
948 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
949 file_name = "l2.html"
951 feature = "-features"
957 elif "xl710" in item:
966 elif "9000b" in item:
978 url = url + file_name + anchor + feature
980 ref = ET.SubElement(td, "a", attrib=dict(href=url))
987 with open(table["output-file"], 'w') as html_file:
988 logging.info(" Writing file: '{0}'".
989 format(table["output-file"]))
990 html_file.write(".. raw:: html\n\n\t")
991 html_file.write(ET.tostring(dashboard))
992 html_file.write("\n\t<p><br><br></p>\n")
994 logging.warning("The output file is not defined.")