1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from math import isnan
25 from collections import OrderedDict
27 from xml.etree import ElementTree as ET
29 from errors import PresentationError
30 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
33 def generate_tables(spec, data):
34 """Generate all tables specified in the specification file.
36 :param spec: Specification read from the specification file.
37 :param data: Data to process.
38 :type spec: Specification
42 logging.info("Generating the tables ...")
43 for table in spec.tables:
45 eval(table["algorithm"])(table, data)
47 logging.error("The algorithm '{0}' is not defined.".
48 format(table["algorithm"]))
52 def table_details(table, input_data):
53 """Generate the table(s) with algorithm: table_detailed_test_results
54 specified in the specification file.
56 :param table: Table to generate.
57 :param input_data: Data to process.
58 :type table: pandas.Series
59 :type input_data: InputData
62 logging.info(" Generating the table {0} ...".
63 format(table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 data = input_data.filter_data(table)
133 data = input_data.merge_data(data)
134 data.sort_index(inplace=True)
136 suites = input_data.filter_data(table, data_set="suites")
137 suites = input_data.merge_data(suites)
139 # Prepare the header of the tables
141 for column in table["columns"]:
142 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
144 for _, suite in suites.iteritems():
146 suite_name = suite["name"]
148 for test in data.keys():
149 if data[test]["parent"] in suite_name:
151 for column in table["columns"]:
153 col_data = str(data[test][column["data"].
154 split(" ")[1]]).replace('"', '""')
155 if column["data"].split(" ")[1] in ("vat-history",
157 col_data = replace(col_data, " |br| ", "",
159 col_data = " |prein| {0} |preout| ".\
160 format(col_data[:-5])
161 row_lst.append('"{0}"'.format(col_data))
163 row_lst.append("No data")
164 table_lst.append(row_lst)
166 # Write the data to file
168 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
169 table["output-file-ext"])
170 logging.info(" Writing file: '{}'".format(file_name))
171 with open(file_name, "w") as file_handler:
172 file_handler.write(",".join(header) + "\n")
173 for item in table_lst:
174 file_handler.write(",".join(item) + "\n")
176 logging.info(" Done.")
179 def table_performance_improvements(table, input_data):
180 """Generate the table(s) with algorithm: table_performance_improvements
181 specified in the specification file.
183 :param table: Table to generate.
184 :param input_data: Data to process.
185 :type table: pandas.Series
186 :type input_data: InputData
189 def _write_line_to_file(file_handler, data):
190 """Write a line to the .csv file.
192 :param file_handler: File handler for the csv file. It must be open for
194 :param data: Item to be written to the file.
195 :type file_handler: BinaryIO
201 if isinstance(item["data"], str):
202 # Remove -?drdisc from the end
203 if item["data"].endswith("drdisc"):
204 item["data"] = item["data"][:-8]
205 line_lst.append(item["data"])
206 elif isinstance(item["data"], float):
207 line_lst.append("{:.1f}".format(item["data"]))
208 elif item["data"] is None:
210 file_handler.write(",".join(line_lst) + "\n")
212 logging.info(" Generating the table {0} ...".
213 format(table.get("title", "")))
216 file_name = table.get("template", None)
219 tmpl = _read_csv_template(file_name)
220 except PresentationError:
221 logging.error(" The template '{0}' does not exist. Skipping the "
222 "table.".format(file_name))
225 logging.error("The template is not defined. Skipping the table.")
229 data = input_data.filter_data(table)
231 # Prepare the header of the tables
233 for column in table["columns"]:
234 header.append(column["title"])
236 # Generate the data for the table according to the model in the table
239 for tmpl_item in tmpl:
241 for column in table["columns"]:
242 cmd = column["data"].split(" ")[0]
243 args = column["data"].split(" ")[1:]
244 if cmd == "template":
246 val = float(tmpl_item[int(args[0])])
248 val = tmpl_item[int(args[0])]
249 tbl_item.append({"data": val})
255 for build in data[job]:
257 data_lst.append(float(build[tmpl_item[0]]
258 ["throughput"]["value"]))
259 except (KeyError, TypeError):
263 tbl_item.append({"data": (eval(operation)(data_lst)) /
266 tbl_item.append({"data": None})
267 elif cmd == "operation":
270 nr1 = float(tbl_item[int(args[1])]["data"])
271 nr2 = float(tbl_item[int(args[2])]["data"])
273 tbl_item.append({"data": eval(operation)(nr1, nr2)})
275 tbl_item.append({"data": None})
276 except (IndexError, ValueError, TypeError):
277 logging.error("No data for {0}".format(tbl_item[0]["data"]))
278 tbl_item.append({"data": None})
281 logging.error("Not supported command {0}. Skipping the table.".
284 tbl_lst.append(tbl_item)
286 # Sort the table according to the relative change
287 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
289 # Create the tables and write them to the files
291 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
293 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
294 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
297 for file_name in file_names:
298 logging.info(" Writing the file '{0}'".format(file_name))
299 with open(file_name, "w") as file_handler:
300 file_handler.write(",".join(header) + "\n")
302 if isinstance(item[-1]["data"], float):
303 rel_change = round(item[-1]["data"], 1)
305 rel_change = item[-1]["data"]
306 if "ndr_top" in file_name \
307 and "ndr" in item[0]["data"] \
308 and rel_change >= 10.0:
309 _write_line_to_file(file_handler, item)
310 elif "pdr_top" in file_name \
311 and "pdr" in item[0]["data"] \
312 and rel_change >= 10.0:
313 _write_line_to_file(file_handler, item)
314 elif "ndr_low" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change < 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_low" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change < 10.0:
321 _write_line_to_file(file_handler, item)
323 logging.info(" Done.")
326 def _read_csv_template(file_name):
327 """Read the template from a .csv file.
329 :param file_name: Name / full path / relative path of the file to read.
331 :returns: Data from the template as list (lines) of lists (items on line).
333 :raises: PresentationError if it is not possible to read the file.
337 with open(file_name, 'r') as csv_file:
339 for line in csv_file:
340 tmpl_data.append(line[:-1].split(","))
342 except IOError as err:
343 raise PresentationError(str(err), level="ERROR")
346 def table_performance_comparison(table, input_data):
347 """Generate the table(s) with algorithm: table_performance_comparison
348 specified in the specification file.
350 :param table: Table to generate.
351 :param input_data: Data to process.
352 :type table: pandas.Series
353 :type input_data: InputData
356 logging.info(" Generating the table {0} ...".
357 format(table.get("title", "")))
360 data = input_data.filter_data(table, continue_on_error=True)
362 # Prepare the header of the tables
364 header = ["Test case", ]
366 history = table.get("history", None)
370 ["{0} Throughput [Mpps]".format(item["title"]),
371 "{0} Stdev [Mpps]".format(item["title"])])
373 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
374 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
375 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
376 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
378 header_str = ",".join(header) + "\n"
379 except (AttributeError, KeyError) as err:
380 logging.error("The model is invalid, missing parameter: {0}".
384 # Prepare data to the table:
386 for job, builds in table["reference"]["data"].items():
388 for tst_name, tst_data in data[job][str(build)].iteritems():
389 if tbl_dict.get(tst_name, None) is None:
390 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
391 "-".join(tst_data["name"].
393 tbl_dict[tst_name] = {"name": name,
397 tbl_dict[tst_name]["ref-data"].\
398 append(tst_data["throughput"]["value"])
400 pass # No data in output.xml for this test
402 for job, builds in table["compare"]["data"].items():
404 for tst_name, tst_data in data[job][str(build)].iteritems():
406 tbl_dict[tst_name]["cmp-data"].\
407 append(tst_data["throughput"]["value"])
411 tbl_dict.pop(tst_name, None)
414 for job, builds in item["data"].items():
416 for tst_name, tst_data in data[job][str(build)].iteritems():
417 if tbl_dict.get(tst_name, None) is None:
419 if tbl_dict[tst_name].get("history", None) is None:
420 tbl_dict[tst_name]["history"] = OrderedDict()
421 if tbl_dict[tst_name]["history"].get(item["title"],
423 tbl_dict[tst_name]["history"][item["title"]] = \
426 tbl_dict[tst_name]["history"][item["title"]].\
427 append(tst_data["throughput"]["value"])
428 except (TypeError, KeyError):
432 for tst_name in tbl_dict.keys():
433 item = [tbl_dict[tst_name]["name"], ]
435 for hist_data in tbl_dict[tst_name]["history"].values():
437 data_t = remove_outliers(
438 hist_data, outlier_const=table["outlier-const"])
440 item.append(round(mean(data_t) / 1000000, 2))
441 item.append(round(stdev(data_t) / 1000000, 2))
443 item.extend([None, None])
445 item.extend([None, None])
446 if tbl_dict[tst_name]["ref-data"]:
447 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
448 outlier_const=table["outlier-const"])
449 # TODO: Specify window size.
451 item.append(round(mean(data_t) / 1000000, 2))
452 item.append(round(stdev(data_t) / 1000000, 2))
454 item.extend([None, None])
456 item.extend([None, None])
457 if tbl_dict[tst_name]["cmp-data"]:
458 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
459 outlier_const=table["outlier-const"])
460 # TODO: Specify window size.
462 item.append(round(mean(data_t) / 1000000, 2))
463 item.append(round(stdev(data_t) / 1000000, 2))
465 item.extend([None, None])
467 item.extend([None, None])
468 if item[-5] is not None and item[-3] is not None and item[-5] != 0:
469 item.append(int(relative_change(float(item[-5]), float(item[-3]))))
470 if len(item) == len(header):
473 # Sort the table according to the relative change
474 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
478 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
479 table["output-file-ext"]),
480 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
481 table["output-file-ext"]),
482 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
483 table["output-file-ext"]),
484 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
485 table["output-file-ext"]),
486 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
487 table["output-file-ext"]),
488 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
489 table["output-file-ext"])
491 for file_name in tbl_names:
492 logging.info(" Writing file: '{0}'".format(file_name))
493 with open(file_name, "w") as file_handler:
494 file_handler.write(header_str)
496 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
497 file_name.split("-")[-2] in test[0]): # cores
498 test[0] = "-".join(test[0].split("-")[:-1])
499 file_handler.write(",".join([str(item) for item in test]) +
503 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
504 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
505 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
506 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
507 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
508 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
511 for i, txt_name in enumerate(tbl_names_txt):
513 logging.info(" Writing file: '{0}'".format(txt_name))
514 with open(tbl_names[i], 'rb') as csv_file:
515 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
516 for row in csv_content:
517 if txt_table is None:
518 txt_table = prettytable.PrettyTable(row)
520 txt_table.add_row(row)
521 txt_table.align["Test case"] = "l"
522 with open(txt_name, "w") as txt_file:
523 txt_file.write(str(txt_table))
525 # Selected tests in csv:
526 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
527 table["output-file-ext"])
528 with open(input_file, "r") as in_file:
533 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
534 table["output-file-ext"])
535 logging.info(" Writing file: '{0}'".format(output_file))
536 with open(output_file, "w") as out_file:
537 out_file.write(header_str)
538 for i, line in enumerate(lines[1:]):
539 if i == table["nr-of-tests-shown"]:
543 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
544 table["output-file-ext"])
545 logging.info(" Writing file: '{0}'".format(output_file))
546 with open(output_file, "w") as out_file:
547 out_file.write(header_str)
548 for i, line in enumerate(lines[-1:0:-1]):
549 if i == table["nr-of-tests-shown"]:
553 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
554 table["output-file-ext"])
555 with open(input_file, "r") as in_file:
560 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
561 table["output-file-ext"])
562 logging.info(" Writing file: '{0}'".format(output_file))
563 with open(output_file, "w") as out_file:
564 out_file.write(header_str)
565 for i, line in enumerate(lines[1:]):
566 if i == table["nr-of-tests-shown"]:
570 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
571 table["output-file-ext"])
572 logging.info(" Writing file: '{0}'".format(output_file))
573 with open(output_file, "w") as out_file:
574 out_file.write(header_str)
575 for i, line in enumerate(lines[-1:0:-1]):
576 if i == table["nr-of-tests-shown"]:
581 def table_performance_comparison_mrr(table, input_data):
582 """Generate the table(s) with algorithm: table_performance_comparison_mrr
583 specified in the specification file.
585 :param table: Table to generate.
586 :param input_data: Data to process.
587 :type table: pandas.Series
588 :type input_data: InputData
591 logging.info(" Generating the table {0} ...".
592 format(table.get("title", "")))
595 data = input_data.filter_data(table, continue_on_error=True)
597 # Prepare the header of the tables
599 header = ["Test case",
600 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
601 "{0} stdev [Mpps]".format(table["reference"]["title"]),
602 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
603 "{0} stdev [Mpps]".format(table["compare"]["title"]),
605 header_str = ",".join(header) + "\n"
606 except (AttributeError, KeyError) as err:
607 logging.error("The model is invalid, missing parameter: {0}".
611 # Prepare data to the table:
613 for job, builds in table["reference"]["data"].items():
615 for tst_name, tst_data in data[job][str(build)].iteritems():
616 if tbl_dict.get(tst_name, None) is None:
617 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
618 "-".join(tst_data["name"].
620 tbl_dict[tst_name] = {"name": name,
624 tbl_dict[tst_name]["ref-data"].\
625 append(tst_data["result"]["throughput"])
627 pass # No data in output.xml for this test
629 for job, builds in table["compare"]["data"].items():
631 for tst_name, tst_data in data[job][str(build)].iteritems():
633 tbl_dict[tst_name]["cmp-data"].\
634 append(tst_data["result"]["throughput"])
638 tbl_dict.pop(tst_name, None)
641 for tst_name in tbl_dict.keys():
642 item = [tbl_dict[tst_name]["name"], ]
643 if tbl_dict[tst_name]["ref-data"]:
644 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
645 outlier_const=table["outlier-const"])
646 # TODO: Specify window size.
648 item.append(round(mean(data_t) / 1000000, 2))
649 item.append(round(stdev(data_t) / 1000000, 2))
651 item.extend([None, None])
653 item.extend([None, None])
654 if tbl_dict[tst_name]["cmp-data"]:
655 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
656 outlier_const=table["outlier-const"])
657 # TODO: Specify window size.
659 item.append(round(mean(data_t) / 1000000, 2))
660 item.append(round(stdev(data_t) / 1000000, 2))
662 item.extend([None, None])
664 item.extend([None, None])
665 if item[1] is not None and item[3] is not None and item[1] != 0:
666 item.append(int(relative_change(float(item[1]), float(item[3]))))
670 # Sort the table according to the relative change
671 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
675 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
676 table["output-file-ext"]),
677 "{0}-2t2c-full{1}".format(table["output-file"],
678 table["output-file-ext"]),
679 "{0}-4t4c-full{1}".format(table["output-file"],
680 table["output-file-ext"])
682 for file_name in tbl_names:
683 logging.info(" Writing file: '{0}'".format(file_name))
684 with open(file_name, "w") as file_handler:
685 file_handler.write(header_str)
687 if file_name.split("-")[-2] in test[0]: # cores
688 test[0] = "-".join(test[0].split("-")[:-1])
689 file_handler.write(",".join([str(item) for item in test]) +
693 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
694 "{0}-2t2c-full.txt".format(table["output-file"]),
695 "{0}-4t4c-full.txt".format(table["output-file"])
698 for i, txt_name in enumerate(tbl_names_txt):
700 logging.info(" Writing file: '{0}'".format(txt_name))
701 with open(tbl_names[i], 'rb') as csv_file:
702 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
703 for row in csv_content:
704 if txt_table is None:
705 txt_table = prettytable.PrettyTable(row)
707 txt_table.add_row(row)
708 txt_table.align["Test case"] = "l"
709 with open(txt_name, "w") as txt_file:
710 txt_file.write(str(txt_table))
713 def table_performance_trending_dashboard(table, input_data):
714 """Generate the table(s) with algorithm: table_performance_comparison
715 specified in the specification file.
717 :param table: Table to generate.
718 :param input_data: Data to process.
719 :type table: pandas.Series
720 :type input_data: InputData
723 logging.info(" Generating the table {0} ...".
724 format(table.get("title", "")))
727 data = input_data.filter_data(table, continue_on_error=True)
729 # Prepare the header of the tables
730 header = ["Test Case",
732 "Short-Term Change [%]",
733 "Long-Term Change [%]",
738 header_str = ",".join(header) + "\n"
740 # Prepare data to the table:
742 for job, builds in table["data"].items():
744 for tst_name, tst_data in data[job][str(build)].iteritems():
745 if tbl_dict.get(tst_name, None) is None:
746 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
747 "-".join(tst_data["name"].
749 tbl_dict[tst_name] = {"name": name,
752 tbl_dict[tst_name]["data"][str(build)] = \
753 tst_data["result"]["throughput"]
754 except (TypeError, KeyError):
755 pass # No data in output.xml for this test
758 for tst_name in tbl_dict.keys():
759 if len(tbl_dict[tst_name]["data"]) > 2:
761 pd_data = pd.Series(tbl_dict[tst_name]["data"])
762 last_key = pd_data.keys()[-1]
763 win_size = min(pd_data.size, table["window"])
764 win_first_idx = pd_data.size - win_size
765 key_14 = pd_data.keys()[win_first_idx]
766 long_win_size = min(pd_data.size, table["long-trend-window"])
768 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
771 median_t = data_t.rolling(window=win_size, min_periods=2).median()
772 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
773 median_first_idx = pd_data.size - long_win_size
775 max_median = max([x for x in median_t.values[median_first_idx:]
780 last_median_t = median_t[last_key]
784 median_t_14 = median_t[key_14]
789 name = tbl_dict[tst_name]["name"]
791 logging.info("{}".format(name))
792 logging.info("pd_data : {}".format(pd_data))
793 logging.info("data_t : {}".format(data_t))
794 logging.info("median_t : {}".format(median_t))
795 logging.info("last_median_t : {}".format(last_median_t))
796 logging.info("median_t_14 : {}".format(median_t_14))
797 logging.info("max_median : {}".format(max_median))
799 # Classification list:
800 classification_lst = list()
801 for build_nr, value in pd_data.iteritems():
803 if isnan(data_t[build_nr]) \
804 or isnan(median_t[build_nr]) \
805 or isnan(stdev_t[build_nr]) \
807 classification_lst.append("outlier")
808 elif value < (median_t[build_nr] - 2 * stdev_t[build_nr]):
809 classification_lst.append("regression")
810 elif value > (median_t[build_nr] + 2 * stdev_t[build_nr]):
811 classification_lst.append("progression")
813 classification_lst.append("normal")
815 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
816 rel_change_last = nan
818 rel_change_last = round(
819 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
821 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
822 rel_change_long = nan
824 rel_change_long = round(
825 ((last_median_t - max_median) / max_median) * 100, 2)
827 logging.info("rel_change_last : {}".format(rel_change_last))
828 logging.info("rel_change_long : {}".format(rel_change_long))
832 '-' if isnan(last_median_t) else
833 round(last_median_t / 1000000, 2),
834 '-' if isnan(rel_change_last) else rel_change_last,
835 '-' if isnan(rel_change_long) else rel_change_long,
836 classification_lst[win_first_idx:].count("regression"),
837 classification_lst[win_first_idx:].count("progression"),
838 classification_lst[win_first_idx:].count("outlier")])
840 tbl_lst.sort(key=lambda rel: rel[0])
843 for nrr in range(table["window"], -1, -1):
844 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
845 for nrp in range(table["window"], -1, -1):
846 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
847 for nro in range(table["window"], -1, -1):
848 tbl_out = [item for item in tbl_pro if item[6] == nro]
849 tbl_out.sort(key=lambda rel: rel[2])
850 tbl_sorted.extend(tbl_out)
852 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
854 logging.info(" Writing file: '{0}'".format(file_name))
855 with open(file_name, "w") as file_handler:
856 file_handler.write(header_str)
857 for test in tbl_sorted:
858 file_handler.write(",".join([str(item) for item in test]) + '\n')
860 txt_file_name = "{0}.txt".format(table["output-file"])
862 logging.info(" Writing file: '{0}'".format(txt_file_name))
863 with open(file_name, 'rb') as csv_file:
864 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
865 for row in csv_content:
866 if txt_table is None:
867 txt_table = prettytable.PrettyTable(row)
869 txt_table.add_row(row)
870 txt_table.align["Test case"] = "l"
871 with open(txt_file_name, "w") as txt_file:
872 txt_file.write(str(txt_table))
875 def table_performance_trending_dashboard_html(table, input_data):
876 """Generate the table(s) with algorithm:
877 table_performance_trending_dashboard_html specified in the specification
880 :param table: Table to generate.
881 :param input_data: Data to process.
882 :type table: pandas.Series
883 :type input_data: InputData
886 logging.info(" Generating the table {0} ...".
887 format(table.get("title", "")))
890 with open(table["input-file"], 'rb') as csv_file:
891 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
892 csv_lst = [item for item in csv_content]
894 logging.warning("The input file is not defined.")
896 except csv.Error as err:
897 logging.warning("Not possible to process the file '{0}'.\n{1}".
898 format(table["input-file"], err))
902 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
905 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
906 for idx, item in enumerate(csv_lst[0]):
907 alignment = "left" if idx == 0 else "center"
908 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
912 for r_idx, row in enumerate(csv_lst[1:]):
913 background = "#D4E4F7" if r_idx % 2 else "white"
914 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
917 for c_idx, item in enumerate(row):
918 alignment = "left" if c_idx == 0 else "center"
919 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
927 file_name = "container_memif.html"
929 elif "vhost" in item:
930 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
931 file_name = "vm_vhost_l2.html"
932 elif "ip4base" in item:
933 file_name = "vm_vhost_ip4.html"
935 elif "ipsec" in item:
936 file_name = "ipsec.html"
938 elif "ethip4lispip" in item or "ethip4vxlan" in item:
939 file_name = "ip4_tunnels.html"
941 elif "ip4base" in item or "ip4scale" in item:
942 file_name = "ip4.html"
943 if "iacl" in item or "snat" in item or "cop" in item:
944 feature = "-features"
946 elif "ip6base" in item or "ip6scale" in item:
947 file_name = "ip6.html"
949 elif "l2xcbase" in item or "l2xcscale" in item \
950 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
951 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
952 file_name = "l2.html"
954 feature = "-features"
960 elif "xl710" in item:
969 elif "9000b" in item:
981 url = url + file_name + anchor + feature
983 ref = ET.SubElement(td, "a", attrib=dict(href=url))
990 with open(table["output-file"], 'w') as html_file:
991 logging.info(" Writing file: '{0}'".
992 format(table["output-file"]))
993 html_file.write(".. raw:: html\n\n\t")
994 html_file.write(ET.tostring(dashboard))
995 html_file.write("\n\t<p><br><br></p>\n")
997 logging.warning("The output file is not defined.")