1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from math import isnan
25 from collections import OrderedDict
27 from xml.etree import ElementTree as ET
29 from errors import PresentationError
30 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
33 def generate_tables(spec, data):
34 """Generate all tables specified in the specification file.
36 :param spec: Specification read from the specification file.
37 :param data: Data to process.
38 :type spec: Specification
42 logging.info("Generating the tables ...")
43 for table in spec.tables:
45 eval(table["algorithm"])(table, data)
47 logging.error("The algorithm '{0}' is not defined.".
48 format(table["algorithm"]))
52 def table_details(table, input_data):
53 """Generate the table(s) with algorithm: table_detailed_test_results
54 specified in the specification file.
56 :param table: Table to generate.
57 :param input_data: Data to process.
58 :type table: pandas.Series
59 :type input_data: InputData
62 logging.info(" Generating the table {0} ...".
63 format(table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 data = input_data.filter_data(table)
133 data = input_data.merge_data(data)
134 data.sort_index(inplace=True)
136 suites = input_data.filter_data(table, data_set="suites")
137 suites = input_data.merge_data(suites)
139 # Prepare the header of the tables
141 for column in table["columns"]:
142 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
144 for _, suite in suites.iteritems():
146 suite_name = suite["name"]
148 for test in data.keys():
149 if data[test]["parent"] in suite_name:
151 for column in table["columns"]:
153 col_data = str(data[test][column["data"].
154 split(" ")[1]]).replace('"', '""')
155 if column["data"].split(" ")[1] in ("vat-history",
157 col_data = replace(col_data, " |br| ", "",
159 col_data = " |prein| {0} |preout| ".\
160 format(col_data[:-5])
161 row_lst.append('"{0}"'.format(col_data))
163 row_lst.append("No data")
164 table_lst.append(row_lst)
166 # Write the data to file
168 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
169 table["output-file-ext"])
170 logging.info(" Writing file: '{}'".format(file_name))
171 with open(file_name, "w") as file_handler:
172 file_handler.write(",".join(header) + "\n")
173 for item in table_lst:
174 file_handler.write(",".join(item) + "\n")
176 logging.info(" Done.")
179 def table_performance_improvements(table, input_data):
180 """Generate the table(s) with algorithm: table_performance_improvements
181 specified in the specification file.
183 :param table: Table to generate.
184 :param input_data: Data to process.
185 :type table: pandas.Series
186 :type input_data: InputData
189 def _write_line_to_file(file_handler, data):
190 """Write a line to the .csv file.
192 :param file_handler: File handler for the csv file. It must be open for
194 :param data: Item to be written to the file.
195 :type file_handler: BinaryIO
201 if isinstance(item["data"], str):
202 # Remove -?drdisc from the end
203 if item["data"].endswith("drdisc"):
204 item["data"] = item["data"][:-8]
205 line_lst.append(item["data"])
206 elif isinstance(item["data"], float):
207 line_lst.append("{:.1f}".format(item["data"]))
208 elif item["data"] is None:
210 file_handler.write(",".join(line_lst) + "\n")
212 logging.info(" Generating the table {0} ...".
213 format(table.get("title", "")))
216 file_name = table.get("template", None)
219 tmpl = _read_csv_template(file_name)
220 except PresentationError:
221 logging.error(" The template '{0}' does not exist. Skipping the "
222 "table.".format(file_name))
225 logging.error("The template is not defined. Skipping the table.")
229 data = input_data.filter_data(table)
231 # Prepare the header of the tables
233 for column in table["columns"]:
234 header.append(column["title"])
236 # Generate the data for the table according to the model in the table
239 for tmpl_item in tmpl:
241 for column in table["columns"]:
242 cmd = column["data"].split(" ")[0]
243 args = column["data"].split(" ")[1:]
244 if cmd == "template":
246 val = float(tmpl_item[int(args[0])])
248 val = tmpl_item[int(args[0])]
249 tbl_item.append({"data": val})
255 for build in data[job]:
257 data_lst.append(float(build[tmpl_item[0]]
258 ["throughput"]["value"]))
259 except (KeyError, TypeError):
263 tbl_item.append({"data": (eval(operation)(data_lst)) /
266 tbl_item.append({"data": None})
267 elif cmd == "operation":
270 nr1 = float(tbl_item[int(args[1])]["data"])
271 nr2 = float(tbl_item[int(args[2])]["data"])
273 tbl_item.append({"data": eval(operation)(nr1, nr2)})
275 tbl_item.append({"data": None})
276 except (IndexError, ValueError, TypeError):
277 logging.error("No data for {0}".format(tbl_item[0]["data"]))
278 tbl_item.append({"data": None})
281 logging.error("Not supported command {0}. Skipping the table.".
284 tbl_lst.append(tbl_item)
286 # Sort the table according to the relative change
287 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
289 # Create the tables and write them to the files
291 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
293 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
294 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
297 for file_name in file_names:
298 logging.info(" Writing the file '{0}'".format(file_name))
299 with open(file_name, "w") as file_handler:
300 file_handler.write(",".join(header) + "\n")
302 if isinstance(item[-1]["data"], float):
303 rel_change = round(item[-1]["data"], 1)
305 rel_change = item[-1]["data"]
306 if "ndr_top" in file_name \
307 and "ndr" in item[0]["data"] \
308 and rel_change >= 10.0:
309 _write_line_to_file(file_handler, item)
310 elif "pdr_top" in file_name \
311 and "pdr" in item[0]["data"] \
312 and rel_change >= 10.0:
313 _write_line_to_file(file_handler, item)
314 elif "ndr_low" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change < 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_low" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change < 10.0:
321 _write_line_to_file(file_handler, item)
323 logging.info(" Done.")
326 def _read_csv_template(file_name):
327 """Read the template from a .csv file.
329 :param file_name: Name / full path / relative path of the file to read.
331 :returns: Data from the template as list (lines) of lists (items on line).
333 :raises: PresentationError if it is not possible to read the file.
337 with open(file_name, 'r') as csv_file:
339 for line in csv_file:
340 tmpl_data.append(line[:-1].split(","))
342 except IOError as err:
343 raise PresentationError(str(err), level="ERROR")
346 def table_performance_comparison(table, input_data):
347 """Generate the table(s) with algorithm: table_performance_comparison
348 specified in the specification file.
350 :param table: Table to generate.
351 :param input_data: Data to process.
352 :type table: pandas.Series
353 :type input_data: InputData
356 logging.info(" Generating the table {0} ...".
357 format(table.get("title", "")))
360 data = input_data.filter_data(table, continue_on_error=True)
362 # Prepare the header of the tables
364 header = ["Test case", ]
366 history = table.get("history", None)
370 ["{0} Throughput [Mpps]".format(item["title"]),
371 "{0} Stdev [Mpps]".format(item["title"])])
373 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
374 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
375 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
376 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
378 header_str = ",".join(header) + "\n"
379 except (AttributeError, KeyError) as err:
380 logging.error("The model is invalid, missing parameter: {0}".
384 # Prepare data to the table:
386 for job, builds in table["reference"]["data"].items():
388 for tst_name, tst_data in data[job][str(build)].iteritems():
389 if tbl_dict.get(tst_name, None) is None:
390 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
391 "-".join(tst_data["name"].
393 tbl_dict[tst_name] = {"name": name,
397 tbl_dict[tst_name]["ref-data"].\
398 append(tst_data["throughput"]["value"])
400 pass # No data in output.xml for this test
402 for job, builds in table["compare"]["data"].items():
404 for tst_name, tst_data in data[job][str(build)].iteritems():
406 tbl_dict[tst_name]["cmp-data"].\
407 append(tst_data["throughput"]["value"])
411 tbl_dict.pop(tst_name, None)
414 for job, builds in item["data"].items():
416 for tst_name, tst_data in data[job][str(build)].iteritems():
417 if tbl_dict[tst_name].get("history", None) is None:
418 tbl_dict[tst_name]["history"] = OrderedDict()
419 if tbl_dict[tst_name]["history"].get(item["title"],
421 tbl_dict[tst_name]["history"][item["title"]] = \
423 tbl_dict[tst_name]["history"][item["title"]].\
424 append(tst_data["throughput"]["value"])
427 for tst_name in tbl_dict.keys():
428 item = [tbl_dict[tst_name]["name"], ]
430 for hist_list in tbl_dict[tst_name]["history"].values():
431 for hist_data in hist_list:
433 data_t = remove_outliers(
434 hist_data, outlier_const=table["outlier-const"])
436 item.append(round(mean(data_t) / 1000000, 2))
437 item.append(round(stdev(data_t) / 1000000, 2))
439 item.extend([None, None])
441 item.extend([None, None])
442 if tbl_dict[tst_name]["ref-data"]:
443 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
444 outlier_const=table["outlier-const"])
445 # TODO: Specify window size.
447 item.append(round(mean(data_t) / 1000000, 2))
448 item.append(round(stdev(data_t) / 1000000, 2))
450 item.extend([None, None])
452 item.extend([None, None])
453 if tbl_dict[tst_name]["cmp-data"]:
454 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
455 outlier_const=table["outlier-const"])
456 # TODO: Specify window size.
458 item.append(round(mean(data_t) / 1000000, 2))
459 item.append(round(stdev(data_t) / 1000000, 2))
461 item.extend([None, None])
463 item.extend([None, None])
464 if item[-5] is not None and item[-3] is not None and item[-5] != 0:
465 item.append(int(relative_change(float(item[-5]), float(item[-3]))))
466 if len(item) == len(header):
469 # Sort the table according to the relative change
470 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
474 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
475 table["output-file-ext"]),
476 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
477 table["output-file-ext"]),
478 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
479 table["output-file-ext"]),
480 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
481 table["output-file-ext"]),
482 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
483 table["output-file-ext"]),
484 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
485 table["output-file-ext"])
487 for file_name in tbl_names:
488 logging.info(" Writing file: '{0}'".format(file_name))
489 with open(file_name, "w") as file_handler:
490 file_handler.write(header_str)
492 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
493 file_name.split("-")[-2] in test[0]): # cores
494 test[0] = "-".join(test[0].split("-")[:-1])
495 file_handler.write(",".join([str(item) for item in test]) +
499 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
500 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
501 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
502 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
503 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
504 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
507 for i, txt_name in enumerate(tbl_names_txt):
509 logging.info(" Writing file: '{0}'".format(txt_name))
510 with open(tbl_names[i], 'rb') as csv_file:
511 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
512 for row in csv_content:
513 if txt_table is None:
514 txt_table = prettytable.PrettyTable(row)
516 txt_table.add_row(row)
517 txt_table.align["Test case"] = "l"
518 with open(txt_name, "w") as txt_file:
519 txt_file.write(str(txt_table))
521 # Selected tests in csv:
522 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
523 table["output-file-ext"])
524 with open(input_file, "r") as in_file:
529 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
530 table["output-file-ext"])
531 logging.info(" Writing file: '{0}'".format(output_file))
532 with open(output_file, "w") as out_file:
533 out_file.write(header_str)
534 for i, line in enumerate(lines[1:]):
535 if i == table["nr-of-tests-shown"]:
539 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
540 table["output-file-ext"])
541 logging.info(" Writing file: '{0}'".format(output_file))
542 with open(output_file, "w") as out_file:
543 out_file.write(header_str)
544 for i, line in enumerate(lines[-1:0:-1]):
545 if i == table["nr-of-tests-shown"]:
549 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
550 table["output-file-ext"])
551 with open(input_file, "r") as in_file:
556 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
557 table["output-file-ext"])
558 logging.info(" Writing file: '{0}'".format(output_file))
559 with open(output_file, "w") as out_file:
560 out_file.write(header_str)
561 for i, line in enumerate(lines[1:]):
562 if i == table["nr-of-tests-shown"]:
566 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
567 table["output-file-ext"])
568 logging.info(" Writing file: '{0}'".format(output_file))
569 with open(output_file, "w") as out_file:
570 out_file.write(header_str)
571 for i, line in enumerate(lines[-1:0:-1]):
572 if i == table["nr-of-tests-shown"]:
577 def table_performance_comparison_mrr(table, input_data):
578 """Generate the table(s) with algorithm: table_performance_comparison_mrr
579 specified in the specification file.
581 :param table: Table to generate.
582 :param input_data: Data to process.
583 :type table: pandas.Series
584 :type input_data: InputData
587 logging.info(" Generating the table {0} ...".
588 format(table.get("title", "")))
591 data = input_data.filter_data(table, continue_on_error=True)
593 # Prepare the header of the tables
595 header = ["Test case",
596 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
597 "{0} stdev [Mpps]".format(table["reference"]["title"]),
598 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
599 "{0} stdev [Mpps]".format(table["compare"]["title"]),
601 header_str = ",".join(header) + "\n"
602 except (AttributeError, KeyError) as err:
603 logging.error("The model is invalid, missing parameter: {0}".
607 # Prepare data to the table:
609 for job, builds in table["reference"]["data"].items():
611 for tst_name, tst_data in data[job][str(build)].iteritems():
612 if tbl_dict.get(tst_name, None) is None:
613 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
614 "-".join(tst_data["name"].
616 tbl_dict[tst_name] = {"name": name,
620 tbl_dict[tst_name]["ref-data"].\
621 append(tst_data["result"]["throughput"])
623 pass # No data in output.xml for this test
625 for job, builds in table["compare"]["data"].items():
627 for tst_name, tst_data in data[job][str(build)].iteritems():
629 tbl_dict[tst_name]["cmp-data"].\
630 append(tst_data["result"]["throughput"])
634 tbl_dict.pop(tst_name, None)
637 for tst_name in tbl_dict.keys():
638 item = [tbl_dict[tst_name]["name"], ]
639 if tbl_dict[tst_name]["ref-data"]:
640 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
641 outlier_const=table["outlier-const"])
642 # TODO: Specify window size.
644 item.append(round(mean(data_t) / 1000000, 2))
645 item.append(round(stdev(data_t) / 1000000, 2))
647 item.extend([None, None])
649 item.extend([None, None])
650 if tbl_dict[tst_name]["cmp-data"]:
651 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
652 outlier_const=table["outlier-const"])
653 # TODO: Specify window size.
655 item.append(round(mean(data_t) / 1000000, 2))
656 item.append(round(stdev(data_t) / 1000000, 2))
658 item.extend([None, None])
660 item.extend([None, None])
661 if item[1] is not None and item[3] is not None and item[1] != 0:
662 item.append(int(relative_change(float(item[1]), float(item[3]))))
666 # Sort the table according to the relative change
667 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
671 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
672 table["output-file-ext"]),
673 "{0}-2t2c-full{1}".format(table["output-file"],
674 table["output-file-ext"]),
675 "{0}-4t4c-full{1}".format(table["output-file"],
676 table["output-file-ext"])
678 for file_name in tbl_names:
679 logging.info(" Writing file: '{0}'".format(file_name))
680 with open(file_name, "w") as file_handler:
681 file_handler.write(header_str)
683 if file_name.split("-")[-2] in test[0]: # cores
684 test[0] = "-".join(test[0].split("-")[:-1])
685 file_handler.write(",".join([str(item) for item in test]) +
689 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
690 "{0}-2t2c-full.txt".format(table["output-file"]),
691 "{0}-4t4c-full.txt".format(table["output-file"])
694 for i, txt_name in enumerate(tbl_names_txt):
696 logging.info(" Writing file: '{0}'".format(txt_name))
697 with open(tbl_names[i], 'rb') as csv_file:
698 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
699 for row in csv_content:
700 if txt_table is None:
701 txt_table = prettytable.PrettyTable(row)
703 txt_table.add_row(row)
704 txt_table.align["Test case"] = "l"
705 with open(txt_name, "w") as txt_file:
706 txt_file.write(str(txt_table))
709 def table_performance_trending_dashboard(table, input_data):
710 """Generate the table(s) with algorithm: table_performance_comparison
711 specified in the specification file.
713 :param table: Table to generate.
714 :param input_data: Data to process.
715 :type table: pandas.Series
716 :type input_data: InputData
719 logging.info(" Generating the table {0} ...".
720 format(table.get("title", "")))
723 data = input_data.filter_data(table, continue_on_error=True)
725 # Prepare the header of the tables
726 header = ["Test Case",
728 "Short-Term Change [%]",
729 "Long-Term Change [%]",
734 header_str = ",".join(header) + "\n"
736 # Prepare data to the table:
738 for job, builds in table["data"].items():
740 for tst_name, tst_data in data[job][str(build)].iteritems():
741 if tbl_dict.get(tst_name, None) is None:
742 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
743 "-".join(tst_data["name"].
745 tbl_dict[tst_name] = {"name": name,
748 tbl_dict[tst_name]["data"][str(build)] = \
749 tst_data["result"]["throughput"]
750 except (TypeError, KeyError):
751 pass # No data in output.xml for this test
754 for tst_name in tbl_dict.keys():
755 if len(tbl_dict[tst_name]["data"]) > 2:
757 pd_data = pd.Series(tbl_dict[tst_name]["data"])
758 last_key = pd_data.keys()[-1]
759 win_size = min(pd_data.size, table["window"])
760 win_first_idx = pd_data.size - win_size
761 key_14 = pd_data.keys()[win_first_idx]
762 long_win_size = min(pd_data.size, table["long-trend-window"])
764 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
767 median_t = data_t.rolling(window=win_size, min_periods=2).median()
768 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
769 median_first_idx = pd_data.size - long_win_size
771 max_median = max([x for x in median_t.values[median_first_idx:]
776 last_median_t = median_t[last_key]
780 median_t_14 = median_t[key_14]
785 name = tbl_dict[tst_name]["name"]
787 logging.info("{}".format(name))
788 logging.info("pd_data : {}".format(pd_data))
789 logging.info("data_t : {}".format(data_t))
790 logging.info("median_t : {}".format(median_t))
791 logging.info("last_median_t : {}".format(last_median_t))
792 logging.info("median_t_14 : {}".format(median_t_14))
793 logging.info("max_median : {}".format(max_median))
795 # Classification list:
796 classification_lst = list()
797 for build_nr, value in pd_data.iteritems():
799 if isnan(data_t[build_nr]) \
800 or isnan(median_t[build_nr]) \
801 or isnan(stdev_t[build_nr]) \
803 classification_lst.append("outlier")
804 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
805 classification_lst.append("regression")
806 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
807 classification_lst.append("progression")
809 classification_lst.append("normal")
811 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
812 rel_change_last = nan
814 rel_change_last = round(
815 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
817 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
818 rel_change_long = nan
820 rel_change_long = round(
821 ((last_median_t - max_median) / max_median) * 100, 2)
823 logging.info("rel_change_last : {}".format(rel_change_last))
824 logging.info("rel_change_long : {}".format(rel_change_long))
828 '-' if isnan(last_median_t) else
829 round(last_median_t / 1000000, 2),
830 '-' if isnan(rel_change_last) else rel_change_last,
831 '-' if isnan(rel_change_long) else rel_change_long,
832 classification_lst[win_first_idx:].count("regression"),
833 classification_lst[win_first_idx:].count("progression"),
834 classification_lst[win_first_idx:].count("outlier")])
836 tbl_lst.sort(key=lambda rel: rel[0])
839 for nrr in range(table["window"], -1, -1):
840 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
841 for nrp in range(table["window"], -1, -1):
842 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
843 for nro in range(table["window"], -1, -1):
844 tbl_out = [item for item in tbl_pro if item[5] == nro]
845 tbl_sorted.extend(tbl_out)
847 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
849 logging.info(" Writing file: '{0}'".format(file_name))
850 with open(file_name, "w") as file_handler:
851 file_handler.write(header_str)
852 for test in tbl_sorted:
853 file_handler.write(",".join([str(item) for item in test]) + '\n')
855 txt_file_name = "{0}.txt".format(table["output-file"])
857 logging.info(" Writing file: '{0}'".format(txt_file_name))
858 with open(file_name, 'rb') as csv_file:
859 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
860 for row in csv_content:
861 if txt_table is None:
862 txt_table = prettytable.PrettyTable(row)
864 txt_table.add_row(row)
865 txt_table.align["Test case"] = "l"
866 with open(txt_file_name, "w") as txt_file:
867 txt_file.write(str(txt_table))
870 def table_performance_trending_dashboard_html(table, input_data):
871 """Generate the table(s) with algorithm:
872 table_performance_trending_dashboard_html specified in the specification
875 :param table: Table to generate.
876 :param input_data: Data to process.
877 :type table: pandas.Series
878 :type input_data: InputData
881 logging.info(" Generating the table {0} ...".
882 format(table.get("title", "")))
885 with open(table["input-file"], 'rb') as csv_file:
886 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
887 csv_lst = [item for item in csv_content]
889 logging.warning("The input file is not defined.")
891 except csv.Error as err:
892 logging.warning("Not possible to process the file '{0}'.\n{1}".
893 format(table["input-file"], err))
897 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
900 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
901 for idx, item in enumerate(csv_lst[0]):
902 alignment = "left" if idx == 0 else "center"
903 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
907 for r_idx, row in enumerate(csv_lst[1:]):
908 background = "#D4E4F7" if r_idx % 2 else "white"
909 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
912 for c_idx, item in enumerate(row):
913 alignment = "left" if c_idx == 0 else "center"
914 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
922 file_name = "container_memif.html"
924 elif "vhost" in item:
925 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
926 file_name = "vm_vhost_l2.html"
927 elif "ip4base" in item:
928 file_name = "vm_vhost_ip4.html"
930 elif "ipsec" in item:
931 file_name = "ipsec.html"
933 elif "ethip4lispip" in item or "ethip4vxlan" in item:
934 file_name = "ip4_tunnels.html"
936 elif "ip4base" in item or "ip4scale" in item:
937 file_name = "ip4.html"
938 if "iacl" in item or "snat" in item or "cop" in item:
939 feature = "-features"
941 elif "ip6base" in item or "ip6scale" in item:
942 file_name = "ip6.html"
944 elif "l2xcbase" in item or "l2xcscale" in item \
945 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
946 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
947 file_name = "l2.html"
949 feature = "-features"
955 elif "xl710" in item:
964 elif "9000b" in item:
976 url = url + file_name + anchor + feature
978 ref = ET.SubElement(td, "a", attrib=dict(href=url))
985 with open(table["output-file"], 'w') as html_file:
986 logging.info(" Writing file: '{0}'".
987 format(table["output-file"]))
988 html_file.write(".. raw:: html\n\n\t")
989 html_file.write(ET.tostring(dashboard))
990 html_file.write("\n\t<p><br><br></p>\n")
992 logging.warning("The output file is not defined.")