1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from math import isnan
25 from collections import OrderedDict
27 from xml.etree import ElementTree as ET
29 from errors import PresentationError
30 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
33 def generate_tables(spec, data):
34 """Generate all tables specified in the specification file.
36 :param spec: Specification read from the specification file.
37 :param data: Data to process.
38 :type spec: Specification
42 logging.info("Generating the tables ...")
43 for table in spec.tables:
45 eval(table["algorithm"])(table, data)
47 logging.error("The algorithm '{0}' is not defined.".
48 format(table["algorithm"]))
52 def table_details(table, input_data):
53 """Generate the table(s) with algorithm: table_detailed_test_results
54 specified in the specification file.
56 :param table: Table to generate.
57 :param input_data: Data to process.
58 :type table: pandas.Series
59 :type input_data: InputData
62 logging.info(" Generating the table {0} ...".
63 format(table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 data = input_data.filter_data(table)
133 data = input_data.merge_data(data)
134 data.sort_index(inplace=True)
136 suites = input_data.filter_data(table, data_set="suites")
137 suites = input_data.merge_data(suites)
139 # Prepare the header of the tables
141 for column in table["columns"]:
142 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
144 for _, suite in suites.iteritems():
146 suite_name = suite["name"]
148 for test in data.keys():
149 if data[test]["parent"] in suite_name:
151 for column in table["columns"]:
153 col_data = str(data[test][column["data"].
154 split(" ")[1]]).replace('"', '""')
155 if column["data"].split(" ")[1] in ("vat-history",
157 col_data = replace(col_data, " |br| ", "",
159 col_data = " |prein| {0} |preout| ".\
160 format(col_data[:-5])
161 row_lst.append('"{0}"'.format(col_data))
163 row_lst.append("No data")
164 table_lst.append(row_lst)
166 # Write the data to file
168 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
169 table["output-file-ext"])
170 logging.info(" Writing file: '{}'".format(file_name))
171 with open(file_name, "w") as file_handler:
172 file_handler.write(",".join(header) + "\n")
173 for item in table_lst:
174 file_handler.write(",".join(item) + "\n")
176 logging.info(" Done.")
179 def table_performance_improvements(table, input_data):
180 """Generate the table(s) with algorithm: table_performance_improvements
181 specified in the specification file.
183 :param table: Table to generate.
184 :param input_data: Data to process.
185 :type table: pandas.Series
186 :type input_data: InputData
189 def _write_line_to_file(file_handler, data):
190 """Write a line to the .csv file.
192 :param file_handler: File handler for the csv file. It must be open for
194 :param data: Item to be written to the file.
195 :type file_handler: BinaryIO
201 if isinstance(item["data"], str):
202 # Remove -?drdisc from the end
203 if item["data"].endswith("drdisc"):
204 item["data"] = item["data"][:-8]
205 line_lst.append(item["data"])
206 elif isinstance(item["data"], float):
207 line_lst.append("{:.1f}".format(item["data"]))
208 elif item["data"] is None:
210 file_handler.write(",".join(line_lst) + "\n")
212 logging.info(" Generating the table {0} ...".
213 format(table.get("title", "")))
216 file_name = table.get("template", None)
219 tmpl = _read_csv_template(file_name)
220 except PresentationError:
221 logging.error(" The template '{0}' does not exist. Skipping the "
222 "table.".format(file_name))
225 logging.error("The template is not defined. Skipping the table.")
229 data = input_data.filter_data(table)
231 # Prepare the header of the tables
233 for column in table["columns"]:
234 header.append(column["title"])
236 # Generate the data for the table according to the model in the table
239 for tmpl_item in tmpl:
241 for column in table["columns"]:
242 cmd = column["data"].split(" ")[0]
243 args = column["data"].split(" ")[1:]
244 if cmd == "template":
246 val = float(tmpl_item[int(args[0])])
248 val = tmpl_item[int(args[0])]
249 tbl_item.append({"data": val})
255 for build in data[job]:
257 data_lst.append(float(build[tmpl_item[0]]
258 ["throughput"]["value"]))
259 except (KeyError, TypeError):
263 tbl_item.append({"data": (eval(operation)(data_lst)) /
266 tbl_item.append({"data": None})
267 elif cmd == "operation":
270 nr1 = float(tbl_item[int(args[1])]["data"])
271 nr2 = float(tbl_item[int(args[2])]["data"])
273 tbl_item.append({"data": eval(operation)(nr1, nr2)})
275 tbl_item.append({"data": None})
276 except (IndexError, ValueError, TypeError):
277 logging.error("No data for {0}".format(tbl_item[0]["data"]))
278 tbl_item.append({"data": None})
281 logging.error("Not supported command {0}. Skipping the table.".
284 tbl_lst.append(tbl_item)
286 # Sort the table according to the relative change
287 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
289 # Create the tables and write them to the files
291 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
293 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
294 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
297 for file_name in file_names:
298 logging.info(" Writing the file '{0}'".format(file_name))
299 with open(file_name, "w") as file_handler:
300 file_handler.write(",".join(header) + "\n")
302 if isinstance(item[-1]["data"], float):
303 rel_change = round(item[-1]["data"], 1)
305 rel_change = item[-1]["data"]
306 if "ndr_top" in file_name \
307 and "ndr" in item[0]["data"] \
308 and rel_change >= 10.0:
309 _write_line_to_file(file_handler, item)
310 elif "pdr_top" in file_name \
311 and "pdr" in item[0]["data"] \
312 and rel_change >= 10.0:
313 _write_line_to_file(file_handler, item)
314 elif "ndr_low" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change < 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_low" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change < 10.0:
321 _write_line_to_file(file_handler, item)
323 logging.info(" Done.")
326 def _read_csv_template(file_name):
327 """Read the template from a .csv file.
329 :param file_name: Name / full path / relative path of the file to read.
331 :returns: Data from the template as list (lines) of lists (items on line).
333 :raises: PresentationError if it is not possible to read the file.
337 with open(file_name, 'r') as csv_file:
339 for line in csv_file:
340 tmpl_data.append(line[:-1].split(","))
342 except IOError as err:
343 raise PresentationError(str(err), level="ERROR")
346 def table_performance_comparison(table, input_data):
347 """Generate the table(s) with algorithm: table_performance_comparison
348 specified in the specification file.
350 :param table: Table to generate.
351 :param input_data: Data to process.
352 :type table: pandas.Series
353 :type input_data: InputData
356 logging.info(" Generating the table {0} ...".
357 format(table.get("title", "")))
360 data = input_data.filter_data(table, continue_on_error=True)
362 # Prepare the header of the tables
364 header = ["Test case", ]
366 history = table.get("history", None)
370 ["{0} Throughput [Mpps]".format(item["title"]),
371 "{0} Stdev [Mpps]".format(item["title"])])
373 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
374 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
375 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
376 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
378 header_str = ",".join(header) + "\n"
379 except (AttributeError, KeyError) as err:
380 logging.error("The model is invalid, missing parameter: {0}".
384 # Prepare data to the table:
386 for job, builds in table["reference"]["data"].items():
388 for tst_name, tst_data in data[job][str(build)].iteritems():
389 if tbl_dict.get(tst_name, None) is None:
390 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
391 "-".join(tst_data["name"].
393 tbl_dict[tst_name] = {"name": name,
397 tbl_dict[tst_name]["ref-data"].\
398 append(tst_data["throughput"]["value"])
400 pass # No data in output.xml for this test
402 for job, builds in table["compare"]["data"].items():
404 for tst_name, tst_data in data[job][str(build)].iteritems():
406 tbl_dict[tst_name]["cmp-data"].\
407 append(tst_data["throughput"]["value"])
411 tbl_dict.pop(tst_name, None)
414 for job, builds in item["data"].items():
416 for tst_name, tst_data in data[job][str(build)].iteritems():
417 if tbl_dict.get(tst_name, None) is None:
419 if tbl_dict[tst_name].get("history", None) is None:
420 tbl_dict[tst_name]["history"] = OrderedDict()
421 if tbl_dict[tst_name]["history"].get(item["title"],
423 tbl_dict[tst_name]["history"][item["title"]] = \
426 tbl_dict[tst_name]["history"][item["title"]].\
427 append(tst_data["throughput"]["value"])
428 except (TypeError, KeyError):
432 for tst_name in tbl_dict.keys():
433 item = [tbl_dict[tst_name]["name"], ]
435 for hist_list in tbl_dict[tst_name]["history"].values():
436 for hist_data in hist_list:
438 data_t = remove_outliers(
439 hist_data, outlier_const=table["outlier-const"])
441 item.append(round(mean(data_t) / 1000000, 2))
442 item.append(round(stdev(data_t) / 1000000, 2))
444 item.extend([None, None])
446 item.extend([None, None])
447 if tbl_dict[tst_name]["ref-data"]:
448 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
449 outlier_const=table["outlier-const"])
450 # TODO: Specify window size.
452 item.append(round(mean(data_t) / 1000000, 2))
453 item.append(round(stdev(data_t) / 1000000, 2))
455 item.extend([None, None])
457 item.extend([None, None])
458 if tbl_dict[tst_name]["cmp-data"]:
459 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
460 outlier_const=table["outlier-const"])
461 # TODO: Specify window size.
463 item.append(round(mean(data_t) / 1000000, 2))
464 item.append(round(stdev(data_t) / 1000000, 2))
466 item.extend([None, None])
468 item.extend([None, None])
469 if item[-5] is not None and item[-3] is not None and item[-5] != 0:
470 item.append(int(relative_change(float(item[-5]), float(item[-3]))))
471 if len(item) == len(header):
474 # Sort the table according to the relative change
475 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
479 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
480 table["output-file-ext"]),
481 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
482 table["output-file-ext"]),
483 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
484 table["output-file-ext"]),
485 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
486 table["output-file-ext"]),
487 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
488 table["output-file-ext"]),
489 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
490 table["output-file-ext"])
492 for file_name in tbl_names:
493 logging.info(" Writing file: '{0}'".format(file_name))
494 with open(file_name, "w") as file_handler:
495 file_handler.write(header_str)
497 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
498 file_name.split("-")[-2] in test[0]): # cores
499 test[0] = "-".join(test[0].split("-")[:-1])
500 file_handler.write(",".join([str(item) for item in test]) +
504 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
505 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
506 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
507 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
508 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
509 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
512 for i, txt_name in enumerate(tbl_names_txt):
514 logging.info(" Writing file: '{0}'".format(txt_name))
515 with open(tbl_names[i], 'rb') as csv_file:
516 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
517 for row in csv_content:
518 if txt_table is None:
519 txt_table = prettytable.PrettyTable(row)
521 txt_table.add_row(row)
522 txt_table.align["Test case"] = "l"
523 with open(txt_name, "w") as txt_file:
524 txt_file.write(str(txt_table))
526 # Selected tests in csv:
527 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
528 table["output-file-ext"])
529 with open(input_file, "r") as in_file:
534 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
535 table["output-file-ext"])
536 logging.info(" Writing file: '{0}'".format(output_file))
537 with open(output_file, "w") as out_file:
538 out_file.write(header_str)
539 for i, line in enumerate(lines[1:]):
540 if i == table["nr-of-tests-shown"]:
544 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
545 table["output-file-ext"])
546 logging.info(" Writing file: '{0}'".format(output_file))
547 with open(output_file, "w") as out_file:
548 out_file.write(header_str)
549 for i, line in enumerate(lines[-1:0:-1]):
550 if i == table["nr-of-tests-shown"]:
554 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
555 table["output-file-ext"])
556 with open(input_file, "r") as in_file:
561 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
562 table["output-file-ext"])
563 logging.info(" Writing file: '{0}'".format(output_file))
564 with open(output_file, "w") as out_file:
565 out_file.write(header_str)
566 for i, line in enumerate(lines[1:]):
567 if i == table["nr-of-tests-shown"]:
571 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
572 table["output-file-ext"])
573 logging.info(" Writing file: '{0}'".format(output_file))
574 with open(output_file, "w") as out_file:
575 out_file.write(header_str)
576 for i, line in enumerate(lines[-1:0:-1]):
577 if i == table["nr-of-tests-shown"]:
582 def table_performance_comparison_mrr(table, input_data):
583 """Generate the table(s) with algorithm: table_performance_comparison_mrr
584 specified in the specification file.
586 :param table: Table to generate.
587 :param input_data: Data to process.
588 :type table: pandas.Series
589 :type input_data: InputData
592 logging.info(" Generating the table {0} ...".
593 format(table.get("title", "")))
596 data = input_data.filter_data(table, continue_on_error=True)
598 # Prepare the header of the tables
600 header = ["Test case",
601 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
602 "{0} stdev [Mpps]".format(table["reference"]["title"]),
603 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
604 "{0} stdev [Mpps]".format(table["compare"]["title"]),
606 header_str = ",".join(header) + "\n"
607 except (AttributeError, KeyError) as err:
608 logging.error("The model is invalid, missing parameter: {0}".
612 # Prepare data to the table:
614 for job, builds in table["reference"]["data"].items():
616 for tst_name, tst_data in data[job][str(build)].iteritems():
617 if tbl_dict.get(tst_name, None) is None:
618 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
619 "-".join(tst_data["name"].
621 tbl_dict[tst_name] = {"name": name,
625 tbl_dict[tst_name]["ref-data"].\
626 append(tst_data["result"]["throughput"])
628 pass # No data in output.xml for this test
630 for job, builds in table["compare"]["data"].items():
632 for tst_name, tst_data in data[job][str(build)].iteritems():
634 tbl_dict[tst_name]["cmp-data"].\
635 append(tst_data["result"]["throughput"])
639 tbl_dict.pop(tst_name, None)
642 for tst_name in tbl_dict.keys():
643 item = [tbl_dict[tst_name]["name"], ]
644 if tbl_dict[tst_name]["ref-data"]:
645 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
646 outlier_const=table["outlier-const"])
647 # TODO: Specify window size.
649 item.append(round(mean(data_t) / 1000000, 2))
650 item.append(round(stdev(data_t) / 1000000, 2))
652 item.extend([None, None])
654 item.extend([None, None])
655 if tbl_dict[tst_name]["cmp-data"]:
656 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
657 outlier_const=table["outlier-const"])
658 # TODO: Specify window size.
660 item.append(round(mean(data_t) / 1000000, 2))
661 item.append(round(stdev(data_t) / 1000000, 2))
663 item.extend([None, None])
665 item.extend([None, None])
666 if item[1] is not None and item[3] is not None and item[1] != 0:
667 item.append(int(relative_change(float(item[1]), float(item[3]))))
671 # Sort the table according to the relative change
672 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
676 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
677 table["output-file-ext"]),
678 "{0}-2t2c-full{1}".format(table["output-file"],
679 table["output-file-ext"]),
680 "{0}-4t4c-full{1}".format(table["output-file"],
681 table["output-file-ext"])
683 for file_name in tbl_names:
684 logging.info(" Writing file: '{0}'".format(file_name))
685 with open(file_name, "w") as file_handler:
686 file_handler.write(header_str)
688 if file_name.split("-")[-2] in test[0]: # cores
689 test[0] = "-".join(test[0].split("-")[:-1])
690 file_handler.write(",".join([str(item) for item in test]) +
694 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
695 "{0}-2t2c-full.txt".format(table["output-file"]),
696 "{0}-4t4c-full.txt".format(table["output-file"])
699 for i, txt_name in enumerate(tbl_names_txt):
701 logging.info(" Writing file: '{0}'".format(txt_name))
702 with open(tbl_names[i], 'rb') as csv_file:
703 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
704 for row in csv_content:
705 if txt_table is None:
706 txt_table = prettytable.PrettyTable(row)
708 txt_table.add_row(row)
709 txt_table.align["Test case"] = "l"
710 with open(txt_name, "w") as txt_file:
711 txt_file.write(str(txt_table))
714 def table_performance_trending_dashboard(table, input_data):
715 """Generate the table(s) with algorithm: table_performance_comparison
716 specified in the specification file.
718 :param table: Table to generate.
719 :param input_data: Data to process.
720 :type table: pandas.Series
721 :type input_data: InputData
724 logging.info(" Generating the table {0} ...".
725 format(table.get("title", "")))
728 data = input_data.filter_data(table, continue_on_error=True)
730 # Prepare the header of the tables
731 header = ["Test Case",
733 "Short-Term Change [%]",
734 "Long-Term Change [%]",
739 header_str = ",".join(header) + "\n"
741 # Prepare data to the table:
743 for job, builds in table["data"].items():
745 for tst_name, tst_data in data[job][str(build)].iteritems():
746 if tbl_dict.get(tst_name, None) is None:
747 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
748 "-".join(tst_data["name"].
750 tbl_dict[tst_name] = {"name": name,
753 tbl_dict[tst_name]["data"][str(build)] = \
754 tst_data["result"]["throughput"]
755 except (TypeError, KeyError):
756 pass # No data in output.xml for this test
759 for tst_name in tbl_dict.keys():
760 if len(tbl_dict[tst_name]["data"]) > 2:
762 pd_data = pd.Series(tbl_dict[tst_name]["data"])
763 last_key = pd_data.keys()[-1]
764 win_size = min(pd_data.size, table["window"])
765 win_first_idx = pd_data.size - win_size
766 key_14 = pd_data.keys()[win_first_idx]
767 long_win_size = min(pd_data.size, table["long-trend-window"])
769 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
772 median_t = data_t.rolling(window=win_size, min_periods=2).median()
773 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
774 median_first_idx = pd_data.size - long_win_size
776 max_median = max([x for x in median_t.values[median_first_idx:]
781 last_median_t = median_t[last_key]
785 median_t_14 = median_t[key_14]
790 name = tbl_dict[tst_name]["name"]
792 logging.info("{}".format(name))
793 logging.info("pd_data : {}".format(pd_data))
794 logging.info("data_t : {}".format(data_t))
795 logging.info("median_t : {}".format(median_t))
796 logging.info("last_median_t : {}".format(last_median_t))
797 logging.info("median_t_14 : {}".format(median_t_14))
798 logging.info("max_median : {}".format(max_median))
800 # Classification list:
801 classification_lst = list()
802 for build_nr, value in pd_data.iteritems():
804 if isnan(data_t[build_nr]) \
805 or isnan(median_t[build_nr]) \
806 or isnan(stdev_t[build_nr]) \
808 classification_lst.append("outlier")
809 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
810 classification_lst.append("regression")
811 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
812 classification_lst.append("progression")
814 classification_lst.append("normal")
816 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
817 rel_change_last = nan
819 rel_change_last = round(
820 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
822 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
823 rel_change_long = nan
825 rel_change_long = round(
826 ((last_median_t - max_median) / max_median) * 100, 2)
828 logging.info("rel_change_last : {}".format(rel_change_last))
829 logging.info("rel_change_long : {}".format(rel_change_long))
833 '-' if isnan(last_median_t) else
834 round(last_median_t / 1000000, 2),
835 '-' if isnan(rel_change_last) else rel_change_last,
836 '-' if isnan(rel_change_long) else rel_change_long,
837 classification_lst[win_first_idx:].count("regression"),
838 classification_lst[win_first_idx:].count("progression"),
839 classification_lst[win_first_idx:].count("outlier")])
841 tbl_lst.sort(key=lambda rel: rel[0])
844 for nrr in range(table["window"], -1, -1):
845 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
846 for nrp in range(table["window"], -1, -1):
847 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
848 for nro in range(table["window"], -1, -1):
849 tbl_out = [item for item in tbl_pro if item[5] == nro]
850 tbl_sorted.extend(tbl_out)
852 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
854 logging.info(" Writing file: '{0}'".format(file_name))
855 with open(file_name, "w") as file_handler:
856 file_handler.write(header_str)
857 for test in tbl_sorted:
858 file_handler.write(",".join([str(item) for item in test]) + '\n')
860 txt_file_name = "{0}.txt".format(table["output-file"])
862 logging.info(" Writing file: '{0}'".format(txt_file_name))
863 with open(file_name, 'rb') as csv_file:
864 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
865 for row in csv_content:
866 if txt_table is None:
867 txt_table = prettytable.PrettyTable(row)
869 txt_table.add_row(row)
870 txt_table.align["Test case"] = "l"
871 with open(txt_file_name, "w") as txt_file:
872 txt_file.write(str(txt_table))
875 def table_performance_trending_dashboard_html(table, input_data):
876 """Generate the table(s) with algorithm:
877 table_performance_trending_dashboard_html specified in the specification
880 :param table: Table to generate.
881 :param input_data: Data to process.
882 :type table: pandas.Series
883 :type input_data: InputData
886 logging.info(" Generating the table {0} ...".
887 format(table.get("title", "")))
890 with open(table["input-file"], 'rb') as csv_file:
891 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
892 csv_lst = [item for item in csv_content]
894 logging.warning("The input file is not defined.")
896 except csv.Error as err:
897 logging.warning("Not possible to process the file '{0}'.\n{1}".
898 format(table["input-file"], err))
902 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
905 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
906 for idx, item in enumerate(csv_lst[0]):
907 alignment = "left" if idx == 0 else "center"
908 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
912 for r_idx, row in enumerate(csv_lst[1:]):
913 background = "#D4E4F7" if r_idx % 2 else "white"
914 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
917 for c_idx, item in enumerate(row):
918 alignment = "left" if c_idx == 0 else "center"
919 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
927 file_name = "container_memif.html"
929 elif "vhost" in item:
930 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
931 file_name = "vm_vhost_l2.html"
932 elif "ip4base" in item:
933 file_name = "vm_vhost_ip4.html"
935 elif "ipsec" in item:
936 file_name = "ipsec.html"
938 elif "ethip4lispip" in item or "ethip4vxlan" in item:
939 file_name = "ip4_tunnels.html"
941 elif "ip4base" in item or "ip4scale" in item:
942 file_name = "ip4.html"
943 if "iacl" in item or "snat" in item or "cop" in item:
944 feature = "-features"
946 elif "ip6base" in item or "ip6scale" in item:
947 file_name = "ip6.html"
949 elif "l2xcbase" in item or "l2xcscale" in item \
950 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
951 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
952 file_name = "l2.html"
954 feature = "-features"
960 elif "xl710" in item:
969 elif "9000b" in item:
981 url = url + file_name + anchor + feature
983 ref = ET.SubElement(td, "a", attrib=dict(href=url))
990 with open(table["output-file"], 'w') as html_file:
991 logging.info(" Writing file: '{0}'".
992 format(table["output-file"]))
993 html_file.write(".. raw:: html\n\n\t")
994 html_file.write(ET.tostring(dashboard))
995 html_file.write("\n\t<p><br><br></p>\n")
997 logging.warning("The output file is not defined.")