1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from math import isnan
25 from collections import OrderedDict
27 from xml.etree import ElementTree as ET
29 from errors import PresentationError
30 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
33 def generate_tables(spec, data):
34 """Generate all tables specified in the specification file.
36 :param spec: Specification read from the specification file.
37 :param data: Data to process.
38 :type spec: Specification
42 logging.info("Generating the tables ...")
43 for table in spec.tables:
45 eval(table["algorithm"])(table, data)
47 logging.error("The algorithm '{0}' is not defined.".
48 format(table["algorithm"]))
52 def table_details(table, input_data):
53 """Generate the table(s) with algorithm: table_detailed_test_results
54 specified in the specification file.
56 :param table: Table to generate.
57 :param input_data: Data to process.
58 :type table: pandas.Series
59 :type input_data: InputData
62 logging.info(" Generating the table {0} ...".
63 format(table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 data = input_data.filter_data(table)
133 data = input_data.merge_data(data)
134 data.sort_index(inplace=True)
136 suites = input_data.filter_data(table, data_set="suites")
137 suites = input_data.merge_data(suites)
139 # Prepare the header of the tables
141 for column in table["columns"]:
142 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
144 for _, suite in suites.iteritems():
146 suite_name = suite["name"]
148 for test in data.keys():
149 if data[test]["parent"] in suite_name:
151 for column in table["columns"]:
153 col_data = str(data[test][column["data"].
154 split(" ")[1]]).replace('"', '""')
155 if column["data"].split(" ")[1] in ("vat-history",
157 col_data = replace(col_data, " |br| ", "",
159 col_data = " |prein| {0} |preout| ".\
160 format(col_data[:-5])
161 row_lst.append('"{0}"'.format(col_data))
163 row_lst.append("No data")
164 table_lst.append(row_lst)
166 # Write the data to file
168 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
169 table["output-file-ext"])
170 logging.info(" Writing file: '{}'".format(file_name))
171 with open(file_name, "w") as file_handler:
172 file_handler.write(",".join(header) + "\n")
173 for item in table_lst:
174 file_handler.write(",".join(item) + "\n")
176 logging.info(" Done.")
179 def table_performance_improvements(table, input_data):
180 """Generate the table(s) with algorithm: table_performance_improvements
181 specified in the specification file.
183 :param table: Table to generate.
184 :param input_data: Data to process.
185 :type table: pandas.Series
186 :type input_data: InputData
189 def _write_line_to_file(file_handler, data):
190 """Write a line to the .csv file.
192 :param file_handler: File handler for the csv file. It must be open for
194 :param data: Item to be written to the file.
195 :type file_handler: BinaryIO
201 if isinstance(item["data"], str):
202 # Remove -?drdisc from the end
203 if item["data"].endswith("drdisc"):
204 item["data"] = item["data"][:-8]
205 line_lst.append(item["data"])
206 elif isinstance(item["data"], float):
207 line_lst.append("{:.1f}".format(item["data"]))
208 elif item["data"] is None:
210 file_handler.write(",".join(line_lst) + "\n")
212 logging.info(" Generating the table {0} ...".
213 format(table.get("title", "")))
216 file_name = table.get("template", None)
219 tmpl = _read_csv_template(file_name)
220 except PresentationError:
221 logging.error(" The template '{0}' does not exist. Skipping the "
222 "table.".format(file_name))
225 logging.error("The template is not defined. Skipping the table.")
229 data = input_data.filter_data(table)
231 # Prepare the header of the tables
233 for column in table["columns"]:
234 header.append(column["title"])
236 # Generate the data for the table according to the model in the table
239 for tmpl_item in tmpl:
241 for column in table["columns"]:
242 cmd = column["data"].split(" ")[0]
243 args = column["data"].split(" ")[1:]
244 if cmd == "template":
246 val = float(tmpl_item[int(args[0])])
248 val = tmpl_item[int(args[0])]
249 tbl_item.append({"data": val})
255 for build in data[job]:
257 data_lst.append(float(build[tmpl_item[0]]
258 ["throughput"]["value"]))
259 except (KeyError, TypeError):
263 tbl_item.append({"data": (eval(operation)(data_lst)) /
266 tbl_item.append({"data": None})
267 elif cmd == "operation":
270 nr1 = float(tbl_item[int(args[1])]["data"])
271 nr2 = float(tbl_item[int(args[2])]["data"])
273 tbl_item.append({"data": eval(operation)(nr1, nr2)})
275 tbl_item.append({"data": None})
276 except (IndexError, ValueError, TypeError):
277 logging.error("No data for {0}".format(tbl_item[0]["data"]))
278 tbl_item.append({"data": None})
281 logging.error("Not supported command {0}. Skipping the table.".
284 tbl_lst.append(tbl_item)
286 # Sort the table according to the relative change
287 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
289 # Create the tables and write them to the files
291 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
293 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
294 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
297 for file_name in file_names:
298 logging.info(" Writing the file '{0}'".format(file_name))
299 with open(file_name, "w") as file_handler:
300 file_handler.write(",".join(header) + "\n")
302 if isinstance(item[-1]["data"], float):
303 rel_change = round(item[-1]["data"], 1)
305 rel_change = item[-1]["data"]
306 if "ndr_top" in file_name \
307 and "ndr" in item[0]["data"] \
308 and rel_change >= 10.0:
309 _write_line_to_file(file_handler, item)
310 elif "pdr_top" in file_name \
311 and "pdr" in item[0]["data"] \
312 and rel_change >= 10.0:
313 _write_line_to_file(file_handler, item)
314 elif "ndr_low" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change < 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_low" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change < 10.0:
321 _write_line_to_file(file_handler, item)
323 logging.info(" Done.")
326 def _read_csv_template(file_name):
327 """Read the template from a .csv file.
329 :param file_name: Name / full path / relative path of the file to read.
331 :returns: Data from the template as list (lines) of lists (items on line).
333 :raises: PresentationError if it is not possible to read the file.
337 with open(file_name, 'r') as csv_file:
339 for line in csv_file:
340 tmpl_data.append(line[:-1].split(","))
342 except IOError as err:
343 raise PresentationError(str(err), level="ERROR")
346 def table_performance_comparison(table, input_data):
347 """Generate the table(s) with algorithm: table_performance_comparison
348 specified in the specification file.
350 :param table: Table to generate.
351 :param input_data: Data to process.
352 :type table: pandas.Series
353 :type input_data: InputData
356 logging.info(" Generating the table {0} ...".
357 format(table.get("title", "")))
360 data = input_data.filter_data(table, continue_on_error=True)
362 # Prepare the header of the tables
364 header = ["Test case", ]
366 history = table.get("history", None)
370 ["{0} Throughput [Mpps]".format(item["title"]),
371 "{0} Stdev [Mpps]".format(item["title"])])
373 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
374 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
375 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
376 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
378 header_str = ",".join(header) + "\n"
379 except (AttributeError, KeyError) as err:
380 logging.error("The model is invalid, missing parameter: {0}".
384 # Prepare data to the table:
386 for job, builds in table["reference"]["data"].items():
388 for tst_name, tst_data in data[job][str(build)].iteritems():
389 if tbl_dict.get(tst_name, None) is None:
390 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
391 "-".join(tst_data["name"].
393 tbl_dict[tst_name] = {"name": name,
397 tbl_dict[tst_name]["ref-data"].\
398 append(tst_data["throughput"]["value"])
400 pass # No data in output.xml for this test
402 for job, builds in table["compare"]["data"].items():
404 for tst_name, tst_data in data[job][str(build)].iteritems():
406 tbl_dict[tst_name]["cmp-data"].\
407 append(tst_data["throughput"]["value"])
411 tbl_dict.pop(tst_name, None)
414 for job, builds in item["data"].items():
416 for tst_name, tst_data in data[job][str(build)].iteritems():
417 if tbl_dict.get(tst_name, None) is None:
419 if tbl_dict[tst_name].get("history", None) is None:
420 tbl_dict[tst_name]["history"] = OrderedDict()
421 if tbl_dict[tst_name]["history"].get(item["title"],
423 tbl_dict[tst_name]["history"][item["title"]] = \
426 tbl_dict[tst_name]["history"][item["title"]].\
427 append(tst_data["throughput"]["value"])
428 except (TypeError, KeyError):
432 for tst_name in tbl_dict.keys():
433 item = [tbl_dict[tst_name]["name"], ]
435 if tbl_dict[tst_name].get("history", None) is not None:
436 for hist_data in tbl_dict[tst_name]["history"].values():
438 data_t = remove_outliers(
439 hist_data, outlier_const=table["outlier-const"])
441 item.append(round(mean(data_t) / 1000000, 2))
442 item.append(round(stdev(data_t) / 1000000, 2))
444 item.extend([None, None])
446 item.extend([None, None])
448 item.extend([None, None])
449 if tbl_dict[tst_name]["ref-data"]:
450 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
451 outlier_const=table["outlier-const"])
452 # TODO: Specify window size.
454 item.append(round(mean(data_t) / 1000000, 2))
455 item.append(round(stdev(data_t) / 1000000, 2))
457 item.extend([None, None])
459 item.extend([None, None])
460 if tbl_dict[tst_name]["cmp-data"]:
461 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
462 outlier_const=table["outlier-const"])
463 # TODO: Specify window size.
465 item.append(round(mean(data_t) / 1000000, 2))
466 item.append(round(stdev(data_t) / 1000000, 2))
468 item.extend([None, None])
470 item.extend([None, None])
471 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
472 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
473 if len(item) == len(header):
476 # Sort the table according to the relative change
477 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
481 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
482 table["output-file-ext"]),
483 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
484 table["output-file-ext"]),
485 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
486 table["output-file-ext"]),
487 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
488 table["output-file-ext"]),
489 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
490 table["output-file-ext"]),
491 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
492 table["output-file-ext"])
494 for file_name in tbl_names:
495 logging.info(" Writing file: '{0}'".format(file_name))
496 with open(file_name, "w") as file_handler:
497 file_handler.write(header_str)
499 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
500 file_name.split("-")[-2] in test[0]): # cores
501 test[0] = "-".join(test[0].split("-")[:-1])
502 file_handler.write(",".join([str(item) for item in test]) +
506 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
507 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
508 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
509 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
510 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
511 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
514 for i, txt_name in enumerate(tbl_names_txt):
516 logging.info(" Writing file: '{0}'".format(txt_name))
517 with open(tbl_names[i], 'rb') as csv_file:
518 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
519 for row in csv_content:
520 if txt_table is None:
521 txt_table = prettytable.PrettyTable(row)
523 txt_table.add_row(row)
524 txt_table.align["Test case"] = "l"
525 with open(txt_name, "w") as txt_file:
526 txt_file.write(str(txt_table))
528 # Selected tests in csv:
529 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
530 table["output-file-ext"])
531 with open(input_file, "r") as in_file:
536 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
537 table["output-file-ext"])
538 logging.info(" Writing file: '{0}'".format(output_file))
539 with open(output_file, "w") as out_file:
540 out_file.write(header_str)
541 for i, line in enumerate(lines[1:]):
542 if i == table["nr-of-tests-shown"]:
546 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
547 table["output-file-ext"])
548 logging.info(" Writing file: '{0}'".format(output_file))
549 with open(output_file, "w") as out_file:
550 out_file.write(header_str)
551 for i, line in enumerate(lines[-1:0:-1]):
552 if i == table["nr-of-tests-shown"]:
556 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
557 table["output-file-ext"])
558 with open(input_file, "r") as in_file:
563 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
564 table["output-file-ext"])
565 logging.info(" Writing file: '{0}'".format(output_file))
566 with open(output_file, "w") as out_file:
567 out_file.write(header_str)
568 for i, line in enumerate(lines[1:]):
569 if i == table["nr-of-tests-shown"]:
573 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
574 table["output-file-ext"])
575 logging.info(" Writing file: '{0}'".format(output_file))
576 with open(output_file, "w") as out_file:
577 out_file.write(header_str)
578 for i, line in enumerate(lines[-1:0:-1]):
579 if i == table["nr-of-tests-shown"]:
584 def table_performance_comparison_mrr(table, input_data):
585 """Generate the table(s) with algorithm: table_performance_comparison_mrr
586 specified in the specification file.
588 :param table: Table to generate.
589 :param input_data: Data to process.
590 :type table: pandas.Series
591 :type input_data: InputData
594 logging.info(" Generating the table {0} ...".
595 format(table.get("title", "")))
598 data = input_data.filter_data(table, continue_on_error=True)
600 # Prepare the header of the tables
602 header = ["Test case",
603 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
604 "{0} stdev [Mpps]".format(table["reference"]["title"]),
605 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
606 "{0} stdev [Mpps]".format(table["compare"]["title"]),
608 header_str = ",".join(header) + "\n"
609 except (AttributeError, KeyError) as err:
610 logging.error("The model is invalid, missing parameter: {0}".
614 # Prepare data to the table:
616 for job, builds in table["reference"]["data"].items():
618 for tst_name, tst_data in data[job][str(build)].iteritems():
619 if tbl_dict.get(tst_name, None) is None:
620 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
621 "-".join(tst_data["name"].
623 tbl_dict[tst_name] = {"name": name,
627 tbl_dict[tst_name]["ref-data"].\
628 append(tst_data["result"]["throughput"])
630 pass # No data in output.xml for this test
632 for job, builds in table["compare"]["data"].items():
634 for tst_name, tst_data in data[job][str(build)].iteritems():
636 tbl_dict[tst_name]["cmp-data"].\
637 append(tst_data["result"]["throughput"])
641 tbl_dict.pop(tst_name, None)
644 for tst_name in tbl_dict.keys():
645 item = [tbl_dict[tst_name]["name"], ]
646 if tbl_dict[tst_name]["ref-data"]:
647 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
648 outlier_const=table["outlier-const"])
649 # TODO: Specify window size.
651 item.append(round(mean(data_t) / 1000000, 2))
652 item.append(round(stdev(data_t) / 1000000, 2))
654 item.extend([None, None])
656 item.extend([None, None])
657 if tbl_dict[tst_name]["cmp-data"]:
658 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
659 outlier_const=table["outlier-const"])
660 # TODO: Specify window size.
662 item.append(round(mean(data_t) / 1000000, 2))
663 item.append(round(stdev(data_t) / 1000000, 2))
665 item.extend([None, None])
667 item.extend([None, None])
668 if item[1] is not None and item[3] is not None and item[1] != 0:
669 item.append(int(relative_change(float(item[1]), float(item[3]))))
673 # Sort the table according to the relative change
674 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
678 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
679 table["output-file-ext"]),
680 "{0}-2t2c-full{1}".format(table["output-file"],
681 table["output-file-ext"]),
682 "{0}-4t4c-full{1}".format(table["output-file"],
683 table["output-file-ext"])
685 for file_name in tbl_names:
686 logging.info(" Writing file: '{0}'".format(file_name))
687 with open(file_name, "w") as file_handler:
688 file_handler.write(header_str)
690 if file_name.split("-")[-2] in test[0]: # cores
691 test[0] = "-".join(test[0].split("-")[:-1])
692 file_handler.write(",".join([str(item) for item in test]) +
696 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
697 "{0}-2t2c-full.txt".format(table["output-file"]),
698 "{0}-4t4c-full.txt".format(table["output-file"])
701 for i, txt_name in enumerate(tbl_names_txt):
703 logging.info(" Writing file: '{0}'".format(txt_name))
704 with open(tbl_names[i], 'rb') as csv_file:
705 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
706 for row in csv_content:
707 if txt_table is None:
708 txt_table = prettytable.PrettyTable(row)
710 txt_table.add_row(row)
711 txt_table.align["Test case"] = "l"
712 with open(txt_name, "w") as txt_file:
713 txt_file.write(str(txt_table))
716 def table_performance_trending_dashboard(table, input_data):
717 """Generate the table(s) with algorithm: table_performance_comparison
718 specified in the specification file.
720 :param table: Table to generate.
721 :param input_data: Data to process.
722 :type table: pandas.Series
723 :type input_data: InputData
726 logging.info(" Generating the table {0} ...".
727 format(table.get("title", "")))
730 data = input_data.filter_data(table, continue_on_error=True)
732 # Prepare the header of the tables
733 header = ["Test Case",
735 "Short-Term Change [%]",
736 "Long-Term Change [%]",
741 header_str = ",".join(header) + "\n"
743 # Prepare data to the table:
745 for job, builds in table["data"].items():
747 for tst_name, tst_data in data[job][str(build)].iteritems():
748 if tbl_dict.get(tst_name, None) is None:
749 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
750 "-".join(tst_data["name"].
752 tbl_dict[tst_name] = {"name": name,
755 tbl_dict[tst_name]["data"][str(build)] = \
756 tst_data["result"]["throughput"]
757 except (TypeError, KeyError):
758 pass # No data in output.xml for this test
761 for tst_name in tbl_dict.keys():
762 if len(tbl_dict[tst_name]["data"]) > 2:
764 pd_data = pd.Series(tbl_dict[tst_name]["data"])
765 last_key = pd_data.keys()[-1]
766 win_size = min(pd_data.size, table["window"])
767 win_first_idx = pd_data.size - win_size
768 key_14 = pd_data.keys()[win_first_idx]
769 long_win_size = min(pd_data.size, table["long-trend-window"])
771 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
774 median_t = data_t.rolling(window=win_size, min_periods=2).median()
775 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
776 median_first_idx = pd_data.size - long_win_size
779 [x for x in median_t.values[median_first_idx:-win_size]
784 last_median_t = median_t[last_key]
788 median_t_14 = median_t[key_14]
793 name = tbl_dict[tst_name]["name"]
795 # Classification list:
796 classification_lst = list()
797 for build_nr, value in pd_data.iteritems():
799 if isnan(data_t[build_nr]) \
800 or isnan(median_t[build_nr]) \
801 or isnan(stdev_t[build_nr]) \
803 classification_lst.append("outlier")
804 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
805 classification_lst.append("regression")
806 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
807 classification_lst.append("progression")
809 classification_lst.append("normal")
811 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
812 rel_change_last = nan
814 rel_change_last = round(
815 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
817 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
818 rel_change_long = nan
820 rel_change_long = round(
821 ((last_median_t - max_median) / max_median) * 100, 2)
823 logging.info("rel_change_last : {}".format(rel_change_last))
824 logging.info("rel_change_long : {}".format(rel_change_long))
828 '-' if isnan(last_median_t) else
829 round(last_median_t / 1000000, 2),
830 '-' if isnan(rel_change_last) else rel_change_last,
831 '-' if isnan(rel_change_long) else rel_change_long,
832 classification_lst[win_first_idx:].count("regression"),
833 classification_lst[win_first_idx:].count("progression"),
834 classification_lst[win_first_idx:].count("outlier")])
836 tbl_lst.sort(key=lambda rel: rel[0])
839 for nrr in range(table["window"], -1, -1):
840 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
841 for nrp in range(table["window"], -1, -1):
842 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
843 for nro in range(table["window"], -1, -1):
844 tbl_out = [item for item in tbl_pro if item[6] == nro]
845 tbl_out.sort(key=lambda rel: rel[2])
846 tbl_sorted.extend(tbl_out)
848 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
850 logging.info(" Writing file: '{0}'".format(file_name))
851 with open(file_name, "w") as file_handler:
852 file_handler.write(header_str)
853 for test in tbl_sorted:
854 file_handler.write(",".join([str(item) for item in test]) + '\n')
856 txt_file_name = "{0}.txt".format(table["output-file"])
858 logging.info(" Writing file: '{0}'".format(txt_file_name))
859 with open(file_name, 'rb') as csv_file:
860 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
861 for row in csv_content:
862 if txt_table is None:
863 txt_table = prettytable.PrettyTable(row)
865 txt_table.add_row(row)
866 txt_table.align["Test case"] = "l"
867 with open(txt_file_name, "w") as txt_file:
868 txt_file.write(str(txt_table))
871 def table_performance_trending_dashboard_html(table, input_data):
872 """Generate the table(s) with algorithm:
873 table_performance_trending_dashboard_html specified in the specification
876 :param table: Table to generate.
877 :param input_data: Data to process.
878 :type table: pandas.Series
879 :type input_data: InputData
882 logging.info(" Generating the table {0} ...".
883 format(table.get("title", "")))
886 with open(table["input-file"], 'rb') as csv_file:
887 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
888 csv_lst = [item for item in csv_content]
890 logging.warning("The input file is not defined.")
892 except csv.Error as err:
893 logging.warning("Not possible to process the file '{0}'.\n{1}".
894 format(table["input-file"], err))
898 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
901 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
902 for idx, item in enumerate(csv_lst[0]):
903 alignment = "left" if idx == 0 else "center"
904 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
908 colors = {"regression": ("#ffcccc", "#ff9999"),
909 "progression": ("#c6ecc6", "#9fdf9f"),
910 "outlier": ("#e6e6e6", "#cccccc"),
911 "normal": ("#e9f1fb", "#d4e4f7")}
912 for r_idx, row in enumerate(csv_lst[1:]):
916 color = "progression"
921 background = colors[color][r_idx % 2]
922 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
925 for c_idx, item in enumerate(row):
926 alignment = "left" if c_idx == 0 else "center"
927 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
935 file_name = "container_memif.html"
937 elif "vhost" in item:
938 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
939 file_name = "vm_vhost_l2.html"
940 elif "ip4base" in item:
941 file_name = "vm_vhost_ip4.html"
943 elif "ipsec" in item:
944 file_name = "ipsec.html"
946 elif "ethip4lispip" in item or "ethip4vxlan" in item:
947 file_name = "ip4_tunnels.html"
949 elif "ip4base" in item or "ip4scale" in item:
950 file_name = "ip4.html"
951 if "iacl" in item or "snat" in item or "cop" in item:
952 feature = "-features"
954 elif "ip6base" in item or "ip6scale" in item:
955 file_name = "ip6.html"
957 elif "l2xcbase" in item or "l2xcscale" in item \
958 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
959 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
960 file_name = "l2.html"
962 feature = "-features"
968 elif "xl710" in item:
977 elif "9000b" in item:
989 url = url + file_name + anchor + feature
991 ref = ET.SubElement(td, "a", attrib=dict(href=url))
998 with open(table["output-file"], 'w') as html_file:
999 logging.info(" Writing file: '{0}'".
1000 format(table["output-file"]))
1001 html_file.write(".. raw:: html\n\n\t")
1002 html_file.write(ET.tostring(dashboard))
1003 html_file.write("\n\t<p><br><br></p>\n")
1005 logging.warning("The output file is not defined.")