1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from math import isnan
25 from xml.etree import ElementTree as ET
27 from errors import PresentationError
28 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
31 def generate_tables(spec, data):
32 """Generate all tables specified in the specification file.
34 :param spec: Specification read from the specification file.
35 :param data: Data to process.
36 :type spec: Specification
40 logging.info("Generating the tables ...")
41 for table in spec.tables:
43 eval(table["algorithm"])(table, data)
45 logging.error("The algorithm '{0}' is not defined.".
46 format(table["algorithm"]))
50 def table_details(table, input_data):
51 """Generate the table(s) with algorithm: table_detailed_test_results
52 specified in the specification file.
54 :param table: Table to generate.
55 :param input_data: Data to process.
56 :type table: pandas.Series
57 :type input_data: InputData
60 logging.info(" Generating the table {0} ...".
61 format(table.get("title", "")))
64 data = input_data.filter_data(table)
66 # Prepare the header of the tables
68 for column in table["columns"]:
69 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
71 # Generate the data for the table according to the model in the table
73 job = table["data"].keys()[0]
74 build = str(table["data"][job][0])
76 suites = input_data.suites(job, build)
78 logging.error(" No data available. The table will not be generated.")
81 for suite_longname, suite in suites.iteritems():
83 suite_name = suite["name"]
85 for test in data[job][build].keys():
86 if data[job][build][test]["parent"] in suite_name:
88 for column in table["columns"]:
90 col_data = str(data[job][build][test][column["data"].
91 split(" ")[1]]).replace('"', '""')
92 if column["data"].split(" ")[1] in ("vat-history",
94 col_data = replace(col_data, " |br| ", "",
96 col_data = " |prein| {0} |preout| ".\
98 row_lst.append('"{0}"'.format(col_data))
100 row_lst.append("No data")
101 table_lst.append(row_lst)
103 # Write the data to file
105 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
106 table["output-file-ext"])
107 logging.info(" Writing file: '{}'".format(file_name))
108 with open(file_name, "w") as file_handler:
109 file_handler.write(",".join(header) + "\n")
110 for item in table_lst:
111 file_handler.write(",".join(item) + "\n")
113 logging.info(" Done.")
116 def table_merged_details(table, input_data):
117 """Generate the table(s) with algorithm: table_merged_details
118 specified in the specification file.
120 :param table: Table to generate.
121 :param input_data: Data to process.
122 :type table: pandas.Series
123 :type input_data: InputData
126 logging.info(" Generating the table {0} ...".
127 format(table.get("title", "")))
130 data = input_data.filter_data(table)
131 data = input_data.merge_data(data)
132 data.sort_index(inplace=True)
134 suites = input_data.filter_data(table, data_set="suites")
135 suites = input_data.merge_data(suites)
137 # Prepare the header of the tables
139 for column in table["columns"]:
140 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
142 for _, suite in suites.iteritems():
144 suite_name = suite["name"]
146 for test in data.keys():
147 if data[test]["parent"] in suite_name:
149 for column in table["columns"]:
151 col_data = str(data[test][column["data"].
152 split(" ")[1]]).replace('"', '""')
153 if column["data"].split(" ")[1] in ("vat-history",
155 col_data = replace(col_data, " |br| ", "",
157 col_data = " |prein| {0} |preout| ".\
158 format(col_data[:-5])
159 row_lst.append('"{0}"'.format(col_data))
161 row_lst.append("No data")
162 table_lst.append(row_lst)
164 # Write the data to file
166 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
167 table["output-file-ext"])
168 logging.info(" Writing file: '{}'".format(file_name))
169 with open(file_name, "w") as file_handler:
170 file_handler.write(",".join(header) + "\n")
171 for item in table_lst:
172 file_handler.write(",".join(item) + "\n")
174 logging.info(" Done.")
177 def table_performance_improvements(table, input_data):
178 """Generate the table(s) with algorithm: table_performance_improvements
179 specified in the specification file.
181 :param table: Table to generate.
182 :param input_data: Data to process.
183 :type table: pandas.Series
184 :type input_data: InputData
187 def _write_line_to_file(file_handler, data):
188 """Write a line to the .csv file.
190 :param file_handler: File handler for the csv file. It must be open for
192 :param data: Item to be written to the file.
193 :type file_handler: BinaryIO
199 if isinstance(item["data"], str):
200 # Remove -?drdisc from the end
201 if item["data"].endswith("drdisc"):
202 item["data"] = item["data"][:-8]
203 line_lst.append(item["data"])
204 elif isinstance(item["data"], float):
205 line_lst.append("{:.1f}".format(item["data"]))
206 elif item["data"] is None:
208 file_handler.write(",".join(line_lst) + "\n")
210 logging.info(" Generating the table {0} ...".
211 format(table.get("title", "")))
214 file_name = table.get("template", None)
217 tmpl = _read_csv_template(file_name)
218 except PresentationError:
219 logging.error(" The template '{0}' does not exist. Skipping the "
220 "table.".format(file_name))
223 logging.error("The template is not defined. Skipping the table.")
227 data = input_data.filter_data(table)
229 # Prepare the header of the tables
231 for column in table["columns"]:
232 header.append(column["title"])
234 # Generate the data for the table according to the model in the table
237 for tmpl_item in tmpl:
239 for column in table["columns"]:
240 cmd = column["data"].split(" ")[0]
241 args = column["data"].split(" ")[1:]
242 if cmd == "template":
244 val = float(tmpl_item[int(args[0])])
246 val = tmpl_item[int(args[0])]
247 tbl_item.append({"data": val})
253 for build in data[job]:
255 data_lst.append(float(build[tmpl_item[0]]
256 ["throughput"]["value"]))
257 except (KeyError, TypeError):
261 tbl_item.append({"data": (eval(operation)(data_lst)) /
264 tbl_item.append({"data": None})
265 elif cmd == "operation":
268 nr1 = float(tbl_item[int(args[1])]["data"])
269 nr2 = float(tbl_item[int(args[2])]["data"])
271 tbl_item.append({"data": eval(operation)(nr1, nr2)})
273 tbl_item.append({"data": None})
274 except (IndexError, ValueError, TypeError):
275 logging.error("No data for {0}".format(tbl_item[0]["data"]))
276 tbl_item.append({"data": None})
279 logging.error("Not supported command {0}. Skipping the table.".
282 tbl_lst.append(tbl_item)
284 # Sort the table according to the relative change
285 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
287 # Create the tables and write them to the files
289 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
290 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
291 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
295 for file_name in file_names:
296 logging.info(" Writing the file '{0}'".format(file_name))
297 with open(file_name, "w") as file_handler:
298 file_handler.write(",".join(header) + "\n")
300 if isinstance(item[-1]["data"], float):
301 rel_change = round(item[-1]["data"], 1)
303 rel_change = item[-1]["data"]
304 if "ndr_top" in file_name \
305 and "ndr" in item[0]["data"] \
306 and rel_change >= 10.0:
307 _write_line_to_file(file_handler, item)
308 elif "pdr_top" in file_name \
309 and "pdr" in item[0]["data"] \
310 and rel_change >= 10.0:
311 _write_line_to_file(file_handler, item)
312 elif "ndr_low" in file_name \
313 and "ndr" in item[0]["data"] \
314 and rel_change < 10.0:
315 _write_line_to_file(file_handler, item)
316 elif "pdr_low" in file_name \
317 and "pdr" in item[0]["data"] \
318 and rel_change < 10.0:
319 _write_line_to_file(file_handler, item)
321 logging.info(" Done.")
324 def _read_csv_template(file_name):
325 """Read the template from a .csv file.
327 :param file_name: Name / full path / relative path of the file to read.
329 :returns: Data from the template as list (lines) of lists (items on line).
331 :raises: PresentationError if it is not possible to read the file.
335 with open(file_name, 'r') as csv_file:
337 for line in csv_file:
338 tmpl_data.append(line[:-1].split(","))
340 except IOError as err:
341 raise PresentationError(str(err), level="ERROR")
344 def table_performance_comparison(table, input_data):
345 """Generate the table(s) with algorithm: table_performance_comparison
346 specified in the specification file.
348 :param table: Table to generate.
349 :param input_data: Data to process.
350 :type table: pandas.Series
351 :type input_data: InputData
354 logging.info(" Generating the table {0} ...".
355 format(table.get("title", "")))
358 data = input_data.filter_data(table, continue_on_error=True)
360 # Prepare the header of the tables
362 header = ["Test case",
363 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
364 "{0} stdev [Mpps]".format(table["reference"]["title"]),
365 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
366 "{0} stdev [Mpps]".format(table["compare"]["title"]),
368 header_str = ",".join(header) + "\n"
369 except (AttributeError, KeyError) as err:
370 logging.error("The model is invalid, missing parameter: {0}".
374 # Prepare data to the table:
376 for job, builds in table["reference"]["data"].items():
378 for tst_name, tst_data in data[job][str(build)].iteritems():
379 if tbl_dict.get(tst_name, None) is None:
380 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
381 "-".join(tst_data["name"].
383 tbl_dict[tst_name] = {"name": name,
387 tbl_dict[tst_name]["ref-data"].\
388 append(tst_data["throughput"]["value"])
390 pass # No data in output.xml for this test
392 for job, builds in table["compare"]["data"].items():
394 for tst_name, tst_data in data[job][str(build)].iteritems():
396 tbl_dict[tst_name]["cmp-data"].\
397 append(tst_data["throughput"]["value"])
401 tbl_dict.pop(tst_name, None)
404 for tst_name in tbl_dict.keys():
405 item = [tbl_dict[tst_name]["name"], ]
406 if tbl_dict[tst_name]["ref-data"]:
407 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
408 outlier_const=table["outlier-const"])
409 # TODO: Specify window size.
411 item.append(round(mean(data_t) / 1000000, 2))
412 item.append(round(stdev(data_t) / 1000000, 2))
414 item.extend([None, None])
416 item.extend([None, None])
417 if tbl_dict[tst_name]["cmp-data"]:
418 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
419 outlier_const=table["outlier-const"])
420 # TODO: Specify window size.
422 item.append(round(mean(data_t) / 1000000, 2))
423 item.append(round(stdev(data_t) / 1000000, 2))
425 item.extend([None, None])
427 item.extend([None, None])
428 if item[1] is not None and item[3] is not None:
429 item.append(int(relative_change(float(item[1]), float(item[3]))))
433 # Sort the table according to the relative change
434 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
438 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
439 table["output-file-ext"]),
440 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
441 table["output-file-ext"]),
442 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
443 table["output-file-ext"]),
444 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
445 table["output-file-ext"]),
446 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
447 table["output-file-ext"]),
448 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
449 table["output-file-ext"])
451 for file_name in tbl_names:
452 logging.info(" Writing file: '{0}'".format(file_name))
453 with open(file_name, "w") as file_handler:
454 file_handler.write(header_str)
456 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
457 file_name.split("-")[-2] in test[0]): # cores
458 test[0] = "-".join(test[0].split("-")[:-1])
459 file_handler.write(",".join([str(item) for item in test]) +
463 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
464 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
465 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
466 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
467 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
468 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
471 for i, txt_name in enumerate(tbl_names_txt):
473 logging.info(" Writing file: '{0}'".format(txt_name))
474 with open(tbl_names[i], 'rb') as csv_file:
475 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
476 for row in csv_content:
477 if txt_table is None:
478 txt_table = prettytable.PrettyTable(row)
480 txt_table.add_row(row)
481 txt_table.align["Test case"] = "l"
482 with open(txt_name, "w") as txt_file:
483 txt_file.write(str(txt_table))
485 # Selected tests in csv:
486 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
487 table["output-file-ext"])
488 with open(input_file, "r") as in_file:
493 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
494 table["output-file-ext"])
495 logging.info(" Writing file: '{0}'".format(output_file))
496 with open(output_file, "w") as out_file:
497 out_file.write(header_str)
498 for i, line in enumerate(lines[1:]):
499 if i == table["nr-of-tests-shown"]:
503 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
504 table["output-file-ext"])
505 logging.info(" Writing file: '{0}'".format(output_file))
506 with open(output_file, "w") as out_file:
507 out_file.write(header_str)
508 for i, line in enumerate(lines[-1:0:-1]):
509 if i == table["nr-of-tests-shown"]:
513 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
514 table["output-file-ext"])
515 with open(input_file, "r") as in_file:
520 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
521 table["output-file-ext"])
522 logging.info(" Writing file: '{0}'".format(output_file))
523 with open(output_file, "w") as out_file:
524 out_file.write(header_str)
525 for i, line in enumerate(lines[1:]):
526 if i == table["nr-of-tests-shown"]:
530 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
531 table["output-file-ext"])
532 logging.info(" Writing file: '{0}'".format(output_file))
533 with open(output_file, "w") as out_file:
534 out_file.write(header_str)
535 for i, line in enumerate(lines[-1:0:-1]):
536 if i == table["nr-of-tests-shown"]:
541 def table_performance_comparison_mrr(table, input_data):
542 """Generate the table(s) with algorithm: table_performance_comparison_mrr
543 specified in the specification file.
545 :param table: Table to generate.
546 :param input_data: Data to process.
547 :type table: pandas.Series
548 :type input_data: InputData
551 logging.info(" Generating the table {0} ...".
552 format(table.get("title", "")))
555 data = input_data.filter_data(table, continue_on_error=True)
557 # Prepare the header of the tables
559 header = ["Test case",
560 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
561 "{0} stdev [Mpps]".format(table["reference"]["title"]),
562 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
563 "{0} stdev [Mpps]".format(table["compare"]["title"]),
565 header_str = ",".join(header) + "\n"
566 except (AttributeError, KeyError) as err:
567 logging.error("The model is invalid, missing parameter: {0}".
571 # Prepare data to the table:
573 for job, builds in table["reference"]["data"].items():
575 for tst_name, tst_data in data[job][str(build)].iteritems():
576 if tbl_dict.get(tst_name, None) is None:
577 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
578 "-".join(tst_data["name"].
580 tbl_dict[tst_name] = {"name": name,
584 tbl_dict[tst_name]["ref-data"].\
585 append(tst_data["result"]["throughput"])
587 pass # No data in output.xml for this test
589 for job, builds in table["compare"]["data"].items():
591 for tst_name, tst_data in data[job][str(build)].iteritems():
593 tbl_dict[tst_name]["cmp-data"].\
594 append(tst_data["result"]["throughput"])
598 tbl_dict.pop(tst_name, None)
601 for tst_name in tbl_dict.keys():
602 item = [tbl_dict[tst_name]["name"], ]
603 if tbl_dict[tst_name]["ref-data"]:
604 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
605 outlier_const=table["outlier-const"])
606 # TODO: Specify window size.
608 item.append(round(mean(data_t) / 1000000, 2))
609 item.append(round(stdev(data_t) / 1000000, 2))
611 item.extend([None, None])
613 item.extend([None, None])
614 if tbl_dict[tst_name]["cmp-data"]:
615 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
616 outlier_const=table["outlier-const"])
617 # TODO: Specify window size.
619 item.append(round(mean(data_t) / 1000000, 2))
620 item.append(round(stdev(data_t) / 1000000, 2))
622 item.extend([None, None])
624 item.extend([None, None])
625 if item[1] is not None and item[3] is not None and item[1] != 0:
626 item.append(int(relative_change(float(item[1]), float(item[3]))))
630 # Sort the table according to the relative change
631 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
635 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
636 table["output-file-ext"]),
637 "{0}-2t2c-full{1}".format(table["output-file"],
638 table["output-file-ext"]),
639 "{0}-4t4c-full{1}".format(table["output-file"],
640 table["output-file-ext"])
642 for file_name in tbl_names:
643 logging.info(" Writing file: '{0}'".format(file_name))
644 with open(file_name, "w") as file_handler:
645 file_handler.write(header_str)
647 if file_name.split("-")[-2] in test[0]: # cores
648 test[0] = "-".join(test[0].split("-")[:-1])
649 file_handler.write(",".join([str(item) for item in test]) +
653 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
654 "{0}-2t2c-full.txt".format(table["output-file"]),
655 "{0}-4t4c-full.txt".format(table["output-file"])
658 for i, txt_name in enumerate(tbl_names_txt):
660 logging.info(" Writing file: '{0}'".format(txt_name))
661 with open(tbl_names[i], 'rb') as csv_file:
662 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
663 for row in csv_content:
664 if txt_table is None:
665 txt_table = prettytable.PrettyTable(row)
667 txt_table.add_row(row)
668 txt_table.align["Test case"] = "l"
669 with open(txt_name, "w") as txt_file:
670 txt_file.write(str(txt_table))
673 def table_performance_trending_dashboard(table, input_data):
674 """Generate the table(s) with algorithm: table_performance_comparison
675 specified in the specification file.
677 :param table: Table to generate.
678 :param input_data: Data to process.
679 :type table: pandas.Series
680 :type input_data: InputData
683 logging.info(" Generating the table {0} ...".
684 format(table.get("title", "")))
687 data = input_data.filter_data(table, continue_on_error=True)
689 # Prepare the header of the tables
690 header = ["Test Case",
691 "Throughput Trend [Mpps]",
692 "Long Trend Compliance",
694 "Top Anomaly [Mpps]",
698 header_str = ",".join(header) + "\n"
700 # Prepare data to the table:
702 for job, builds in table["data"].items():
704 for tst_name, tst_data in data[job][str(build)].iteritems():
705 if tbl_dict.get(tst_name, None) is None:
706 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
707 "-".join(tst_data["name"].
709 tbl_dict[tst_name] = {"name": name,
712 tbl_dict[tst_name]["data"][str(build)] = \
713 tst_data["result"]["throughput"]
714 except (TypeError, KeyError):
715 pass # No data in output.xml for this test
718 for tst_name in tbl_dict.keys():
719 if len(tbl_dict[tst_name]["data"]) > 2:
721 pd_data = pd.Series(tbl_dict[tst_name]["data"])
722 win_size = min(pd_data.size, table["window"])
724 name = tbl_dict[tst_name]["name"]
726 median = pd_data.rolling(window=win_size, min_periods=2).median()
727 median_idx = pd_data.size - table["long-trend-window"]
728 median_idx = 0 if median_idx < 0 else median_idx
729 max_median = max(median.values[median_idx:])
730 trimmed_data, _ = split_outliers(pd_data, outlier_const=1.5,
732 stdev_t = pd_data.rolling(window=win_size, min_periods=2).std()
734 rel_change_lst = [None, ]
735 classification_lst = [None, ]
736 median_lst = [None, ]
737 sample_lst = [None, ]
739 for build_nr, value in pd_data.iteritems():
743 # Relative changes list:
744 if not isnan(value) \
745 and not isnan(median[build_nr]) \
746 and median[build_nr] != 0:
747 rel_change_lst.append(round(
748 relative_change(float(median[build_nr]), float(value)),
751 rel_change_lst.append(None)
753 # Classification list:
754 if isnan(trimmed_data[build_nr]) \
755 or isnan(median[build_nr]) \
756 or isnan(stdev_t[build_nr]) \
758 classification_lst.append("outlier")
759 elif value < (median[build_nr] - 3 * stdev_t[build_nr]):
760 classification_lst.append("regression")
761 elif value > (median[build_nr] + 3 * stdev_t[build_nr]):
762 classification_lst.append("progression")
764 classification_lst.append("normal")
765 sample_lst.append(value)
766 median_lst.append(median[build_nr])
768 last_idx = len(classification_lst) - 1
769 first_idx = last_idx - int(table["evaluated-window"])
774 consecutive_outliers = 0
776 for item in classification_lst[first_idx:]:
777 if item == "outlier":
779 consecutive_outliers += 1
780 if consecutive_outliers == 3:
783 consecutive_outliers = 0
786 classification = "failure"
787 elif "regression" in classification_lst[first_idx:]:
788 classification = "regression"
789 elif "progression" in classification_lst[first_idx:]:
790 classification = "progression"
792 classification = "normal"
794 if classification == "normal":
795 index = len(classification_lst) - 1
797 tmp_classification = "outlier" if classification == "failure" \
800 for idx in range(first_idx, len(classification_lst)):
801 if classification_lst[idx] == tmp_classification:
802 if rel_change_lst[idx]:
807 for idx in range(index+1, len(classification_lst)):
808 if classification_lst[idx] == tmp_classification:
809 if rel_change_lst[idx]:
810 if (abs(rel_change_lst[idx]) >
811 abs(rel_change_lst[index])):
814 logging.info("{}".format(name))
815 logging.info("sample_lst: {} - {}".format(len(sample_lst), sample_lst))
816 logging.info("median_lst: {} - {}".format(len(median_lst), median_lst))
817 logging.info("rel_change: {} - {}".format(len(rel_change_lst), rel_change_lst))
818 logging.info("classn_lst: {} - {}".format(len(classification_lst), classification_lst))
819 logging.info("index: {}".format(index))
820 logging.info("classifica: {}".format(classification))
823 trend = round(float(median_lst[-1]) / 1000000, 2) \
824 if not isnan(median_lst[-1]) else '-'
825 sample = round(float(sample_lst[index]) / 1000000, 2) \
826 if not isnan(sample_lst[index]) else '-'
827 rel_change = rel_change_lst[index] \
828 if rel_change_lst[index] is not None else '-'
829 if not isnan(max_median):
830 if not isnan(sample_lst[index]):
831 long_trend_threshold = max_median * \
832 (table["long-trend-threshold"] / 100)
833 if sample_lst[index] < long_trend_threshold:
834 long_trend_classification = "failure"
836 long_trend_classification = '-'
838 long_trend_classification = "failure"
840 long_trend_classification = '-'
841 tbl_lst.append([name,
843 long_trend_classification,
845 '-' if classification == "normal" else sample,
846 '-' if classification == "normal" else rel_change,
848 except IndexError as err:
849 logging.error("{}".format(err))
852 # Sort the table according to the classification
854 for long_trend_class in ("failure", '-'):
855 tbl_long = [item for item in tbl_lst if item[2] == long_trend_class]
856 for classification in \
857 ("failure", "regression", "progression", "normal"):
858 tbl_tmp = [item for item in tbl_long if item[3] == classification]
859 tbl_tmp.sort(key=lambda rel: rel[0])
860 tbl_sorted.extend(tbl_tmp)
862 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
864 logging.info(" Writing file: '{0}'".format(file_name))
865 with open(file_name, "w") as file_handler:
866 file_handler.write(header_str)
867 for test in tbl_sorted:
868 file_handler.write(",".join([str(item) for item in test]) + '\n')
870 txt_file_name = "{0}.txt".format(table["output-file"])
872 logging.info(" Writing file: '{0}'".format(txt_file_name))
873 with open(file_name, 'rb') as csv_file:
874 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
875 for row in csv_content:
876 if txt_table is None:
877 txt_table = prettytable.PrettyTable(row)
879 txt_table.add_row(row)
880 txt_table.align["Test case"] = "l"
881 with open(txt_file_name, "w") as txt_file:
882 txt_file.write(str(txt_table))
885 def table_performance_trending_dashboard_html(table, input_data):
886 """Generate the table(s) with algorithm:
887 table_performance_trending_dashboard_html specified in the specification
890 :param table: Table to generate.
891 :param input_data: Data to process.
892 :type table: pandas.Series
893 :type input_data: InputData
896 logging.info(" Generating the table {0} ...".
897 format(table.get("title", "")))
900 with open(table["input-file"], 'rb') as csv_file:
901 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
902 csv_lst = [item for item in csv_content]
904 logging.warning("The input file is not defined.")
906 except csv.Error as err:
907 logging.warning("Not possible to process the file '{0}'.\n{1}".
908 format(table["input-file"], err))
912 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
915 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
916 for idx, item in enumerate(csv_lst[0]):
917 alignment = "left" if idx == 0 else "center"
918 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
922 for r_idx, row in enumerate(csv_lst[1:]):
923 background = "#D4E4F7" if r_idx % 2 else "white"
924 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
927 for c_idx, item in enumerate(row):
928 alignment = "left" if c_idx == 0 else "center"
929 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
937 file_name = "container_memif.html"
939 elif "vhost" in item:
940 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
941 file_name = "vm_vhost_l2.html"
942 elif "ip4base" in item:
943 file_name = "vm_vhost_ip4.html"
945 elif "ipsec" in item:
946 file_name = "ipsec.html"
948 elif "ethip4lispip" in item or "ethip4vxlan" in item:
949 file_name = "ip4_tunnels.html"
951 elif "ip4base" in item or "ip4scale" in item:
952 file_name = "ip4.html"
953 if "iacl" in item or "snat" in item or "cop" in item:
954 feature = "-features"
956 elif "ip6base" in item or "ip6scale" in item:
957 file_name = "ip6.html"
959 elif "l2xcbase" in item or "l2xcscale" in item \
960 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
961 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
962 file_name = "l2.html"
964 feature = "-features"
970 elif "xl710" in item:
979 elif "9000b" in item:
991 url = url + file_name + anchor + feature
993 ref = ET.SubElement(td, "a", attrib=dict(href=url))
997 if item == "regression":
998 td.set("bgcolor", "#eca1a6")
999 elif item == "failure":
1000 td.set("bgcolor", "#d6cbd3")
1001 elif item == "progression":
1002 td.set("bgcolor", "#bdcebe")
1007 with open(table["output-file"], 'w') as html_file:
1008 logging.info(" Writing file: '{0}'".
1009 format(table["output-file"]))
1010 html_file.write(".. raw:: html\n\n\t")
1011 html_file.write(ET.tostring(dashboard))
1012 html_file.write("\n\t<p><br><br></p>\n")
1014 logging.warning("The output file is not defined.")