1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from collections import OrderedDict
25 from numpy import nan, isnan
26 from xml.etree import ElementTree as ET
28 from errors import PresentationError
29 from utils import mean, stdev, relative_change, classify_anomalies
32 def generate_tables(spec, data):
33 """Generate all tables specified in the specification file.
35 :param spec: Specification read from the specification file.
36 :param data: Data to process.
37 :type spec: Specification
41 logging.info("Generating the tables ...")
42 for table in spec.tables:
44 eval(table["algorithm"])(table, data)
45 except NameError as err:
46 logging.error("Probably algorithm '{alg}' is not defined: {err}".
47 format(alg=table["algorithm"], err=repr(err)))
51 def table_details(table, input_data):
52 """Generate the table(s) with algorithm: table_detailed_test_results
53 specified in the specification file.
55 :param table: Table to generate.
56 :param input_data: Data to process.
57 :type table: pandas.Series
58 :type input_data: InputData
61 logging.info(" Generating the table {0} ...".
62 format(table.get("title", "")))
65 logging.info(" Creating the data set for the {0} '{1}'.".
66 format(table.get("type", ""), table.get("title", "")))
67 data = input_data.filter_data(table)
69 # Prepare the header of the tables
71 for column in table["columns"]:
72 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
74 # Generate the data for the table according to the model in the table
76 job = table["data"].keys()[0]
77 build = str(table["data"][job][0])
79 suites = input_data.suites(job, build)
81 logging.error(" No data available. The table will not be generated.")
84 for suite_longname, suite in suites.iteritems():
86 suite_name = suite["name"]
88 for test in data[job][build].keys():
89 if data[job][build][test]["parent"] in suite_name:
91 for column in table["columns"]:
93 col_data = str(data[job][build][test][column["data"].
94 split(" ")[1]]).replace('"', '""')
95 if column["data"].split(" ")[1] in ("vat-history",
97 col_data = replace(col_data, " |br| ", "",
99 col_data = " |prein| {0} |preout| ".\
100 format(col_data[:-5])
101 row_lst.append('"{0}"'.format(col_data))
103 row_lst.append("No data")
104 table_lst.append(row_lst)
106 # Write the data to file
108 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
109 table["output-file-ext"])
110 logging.info(" Writing file: '{}'".format(file_name))
111 with open(file_name, "w") as file_handler:
112 file_handler.write(",".join(header) + "\n")
113 for item in table_lst:
114 file_handler.write(",".join(item) + "\n")
116 logging.info(" Done.")
119 def table_merged_details(table, input_data):
120 """Generate the table(s) with algorithm: table_merged_details
121 specified in the specification file.
123 :param table: Table to generate.
124 :param input_data: Data to process.
125 :type table: pandas.Series
126 :type input_data: InputData
129 logging.info(" Generating the table {0} ...".
130 format(table.get("title", "")))
133 logging.info(" Creating the data set for the {0} '{1}'.".
134 format(table.get("type", ""), table.get("title", "")))
135 data = input_data.filter_data(table)
136 data = input_data.merge_data(data)
137 data.sort_index(inplace=True)
139 logging.info(" Creating the data set for the {0} '{1}'.".
140 format(table.get("type", ""), table.get("title", "")))
141 suites = input_data.filter_data(table, data_set="suites")
142 suites = input_data.merge_data(suites)
144 # Prepare the header of the tables
146 for column in table["columns"]:
147 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
149 for _, suite in suites.iteritems():
151 suite_name = suite["name"]
153 for test in data.keys():
154 if data[test]["parent"] in suite_name:
156 for column in table["columns"]:
158 col_data = str(data[test][column["data"].
159 split(" ")[1]]).replace('"', '""')
160 if column["data"].split(" ")[1] in ("vat-history",
162 col_data = replace(col_data, " |br| ", "",
164 col_data = " |prein| {0} |preout| ".\
165 format(col_data[:-5])
166 row_lst.append('"{0}"'.format(col_data))
168 row_lst.append("No data")
169 table_lst.append(row_lst)
171 # Write the data to file
173 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
174 table["output-file-ext"])
175 logging.info(" Writing file: '{}'".format(file_name))
176 with open(file_name, "w") as file_handler:
177 file_handler.write(",".join(header) + "\n")
178 for item in table_lst:
179 file_handler.write(",".join(item) + "\n")
181 logging.info(" Done.")
184 def table_performance_improvements(table, input_data):
185 """Generate the table(s) with algorithm: table_performance_improvements
186 specified in the specification file.
188 :param table: Table to generate.
189 :param input_data: Data to process.
190 :type table: pandas.Series
191 :type input_data: InputData
194 def _write_line_to_file(file_handler, data):
195 """Write a line to the .csv file.
197 :param file_handler: File handler for the csv file. It must be open for
199 :param data: Item to be written to the file.
200 :type file_handler: BinaryIO
206 if isinstance(item["data"], str):
207 # Remove -?drdisc from the end
208 if item["data"].endswith("drdisc"):
209 item["data"] = item["data"][:-8]
210 line_lst.append(item["data"])
211 elif isinstance(item["data"], float):
212 line_lst.append("{:.1f}".format(item["data"]))
213 elif item["data"] is None:
215 file_handler.write(",".join(line_lst) + "\n")
217 logging.info(" Generating the table {0} ...".
218 format(table.get("title", "")))
221 file_name = table.get("template", None)
224 tmpl = _read_csv_template(file_name)
225 except PresentationError:
226 logging.error(" The template '{0}' does not exist. Skipping the "
227 "table.".format(file_name))
230 logging.error("The template is not defined. Skipping the table.")
234 logging.info(" Creating the data set for the {0} '{1}'.".
235 format(table.get("type", ""), table.get("title", "")))
236 data = input_data.filter_data(table)
238 # Prepare the header of the tables
240 for column in table["columns"]:
241 header.append(column["title"])
243 # Generate the data for the table according to the model in the table
246 for tmpl_item in tmpl:
248 for column in table["columns"]:
249 cmd = column["data"].split(" ")[0]
250 args = column["data"].split(" ")[1:]
251 if cmd == "template":
253 val = float(tmpl_item[int(args[0])])
255 val = tmpl_item[int(args[0])]
256 tbl_item.append({"data": val})
262 for build in data[job]:
264 data_lst.append(float(build[tmpl_item[0]]
265 ["throughput"]["value"]))
266 except (KeyError, TypeError):
270 tbl_item.append({"data": (eval(operation)(data_lst)) /
273 tbl_item.append({"data": None})
274 elif cmd == "operation":
277 nr1 = float(tbl_item[int(args[1])]["data"])
278 nr2 = float(tbl_item[int(args[2])]["data"])
280 tbl_item.append({"data": eval(operation)(nr1, nr2)})
282 tbl_item.append({"data": None})
283 except (IndexError, ValueError, TypeError):
284 logging.error("No data for {0}".format(tbl_item[0]["data"]))
285 tbl_item.append({"data": None})
288 logging.error("Not supported command {0}. Skipping the table.".
291 tbl_lst.append(tbl_item)
293 # Sort the table according to the relative change
294 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
296 # Create the tables and write them to the files
298 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
299 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
304 for file_name in file_names:
305 logging.info(" Writing the file '{0}'".format(file_name))
306 with open(file_name, "w") as file_handler:
307 file_handler.write(",".join(header) + "\n")
309 if isinstance(item[-1]["data"], float):
310 rel_change = round(item[-1]["data"], 1)
312 rel_change = item[-1]["data"]
313 if "ndr_top" in file_name \
314 and "ndr" in item[0]["data"] \
315 and rel_change >= 10.0:
316 _write_line_to_file(file_handler, item)
317 elif "pdr_top" in file_name \
318 and "pdr" in item[0]["data"] \
319 and rel_change >= 10.0:
320 _write_line_to_file(file_handler, item)
321 elif "ndr_low" in file_name \
322 and "ndr" in item[0]["data"] \
323 and rel_change < 10.0:
324 _write_line_to_file(file_handler, item)
325 elif "pdr_low" in file_name \
326 and "pdr" in item[0]["data"] \
327 and rel_change < 10.0:
328 _write_line_to_file(file_handler, item)
330 logging.info(" Done.")
333 def _read_csv_template(file_name):
334 """Read the template from a .csv file.
336 :param file_name: Name / full path / relative path of the file to read.
338 :returns: Data from the template as list (lines) of lists (items on line).
340 :raises: PresentationError if it is not possible to read the file.
344 with open(file_name, 'r') as csv_file:
346 for line in csv_file:
347 tmpl_data.append(line[:-1].split(","))
349 except IOError as err:
350 raise PresentationError(str(err), level="ERROR")
353 def table_performance_comparison(table, input_data):
354 """Generate the table(s) with algorithm: table_performance_comparison
355 specified in the specification file.
357 :param table: Table to generate.
358 :param input_data: Data to process.
359 :type table: pandas.Series
360 :type input_data: InputData
363 logging.info(" Generating the table {0} ...".
364 format(table.get("title", "")))
367 logging.info(" Creating the data set for the {0} '{1}'.".
368 format(table.get("type", ""), table.get("title", "")))
369 data = input_data.filter_data(table, continue_on_error=True)
371 # Prepare the header of the tables
373 header = ["Test case", ]
375 history = table.get("history", None)
379 ["{0} Throughput [Mpps]".format(item["title"]),
380 "{0} Stdev [Mpps]".format(item["title"])])
382 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
383 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
384 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
385 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
387 header_str = ",".join(header) + "\n"
388 except (AttributeError, KeyError) as err:
389 logging.error("The model is invalid, missing parameter: {0}".
393 # Prepare data to the table:
395 for job, builds in table["reference"]["data"].items():
397 for tst_name, tst_data in data[job][str(build)].iteritems():
398 if tbl_dict.get(tst_name, None) is None:
399 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
400 "-".join(tst_data["name"].
402 tbl_dict[tst_name] = {"name": name,
406 tbl_dict[tst_name]["ref-data"].\
407 append(tst_data["throughput"]["value"])
409 pass # No data in output.xml for this test
411 for job, builds in table["compare"]["data"].items():
413 for tst_name, tst_data in data[job][str(build)].iteritems():
415 tbl_dict[tst_name]["cmp-data"].\
416 append(tst_data["throughput"]["value"])
420 tbl_dict.pop(tst_name, None)
423 for job, builds in item["data"].items():
425 for tst_name, tst_data in data[job][str(build)].iteritems():
426 if tbl_dict.get(tst_name, None) is None:
428 if tbl_dict[tst_name].get("history", None) is None:
429 tbl_dict[tst_name]["history"] = OrderedDict()
430 if tbl_dict[tst_name]["history"].get(item["title"],
432 tbl_dict[tst_name]["history"][item["title"]] = \
435 tbl_dict[tst_name]["history"][item["title"]].\
436 append(tst_data["throughput"]["value"])
437 except (TypeError, KeyError):
441 for tst_name in tbl_dict.keys():
442 item = [tbl_dict[tst_name]["name"], ]
444 if tbl_dict[tst_name].get("history", None) is not None:
445 for hist_data in tbl_dict[tst_name]["history"].values():
447 item.append(round(mean(hist_data) / 1000000, 2))
448 item.append(round(stdev(hist_data) / 1000000, 2))
450 item.extend([None, None])
452 item.extend([None, None])
453 if tbl_dict[tst_name]["ref-data"]:
454 data_t = tbl_dict[tst_name]["ref-data"]
455 # TODO: Specify window size.
457 item.append(round(mean(data_t) / 1000000, 2))
458 item.append(round(stdev(data_t) / 1000000, 2))
460 item.extend([None, None])
462 item.extend([None, None])
463 if tbl_dict[tst_name]["cmp-data"]:
464 data_t = tbl_dict[tst_name]["cmp-data"]
465 # TODO: Specify window size.
467 item.append(round(mean(data_t) / 1000000, 2))
468 item.append(round(stdev(data_t) / 1000000, 2))
470 item.extend([None, None])
472 item.extend([None, None])
473 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
474 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
475 if len(item) == len(header):
478 # Sort the table according to the relative change
479 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
483 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
484 table["output-file-ext"]),
485 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
486 table["output-file-ext"]),
487 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
488 table["output-file-ext"]),
489 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
490 table["output-file-ext"]),
491 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
492 table["output-file-ext"]),
493 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
494 table["output-file-ext"])
496 for file_name in tbl_names:
497 logging.info(" Writing file: '{0}'".format(file_name))
498 with open(file_name, "w") as file_handler:
499 file_handler.write(header_str)
501 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
502 file_name.split("-")[-2] in test[0]): # cores
503 test[0] = "-".join(test[0].split("-")[:-1])
504 file_handler.write(",".join([str(item) for item in test]) +
508 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
509 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
510 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
511 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
512 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
513 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
516 for i, txt_name in enumerate(tbl_names_txt):
518 logging.info(" Writing file: '{0}'".format(txt_name))
519 with open(tbl_names[i], 'rb') as csv_file:
520 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
521 for row in csv_content:
522 if txt_table is None:
523 txt_table = prettytable.PrettyTable(row)
525 txt_table.add_row(row)
526 txt_table.align["Test case"] = "l"
527 with open(txt_name, "w") as txt_file:
528 txt_file.write(str(txt_table))
530 # Selected tests in csv:
531 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
532 table["output-file-ext"])
533 with open(input_file, "r") as in_file:
538 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
539 table["output-file-ext"])
540 logging.info(" Writing file: '{0}'".format(output_file))
541 with open(output_file, "w") as out_file:
542 out_file.write(header_str)
543 for i, line in enumerate(lines[1:]):
544 if i == table["nr-of-tests-shown"]:
548 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
549 table["output-file-ext"])
550 logging.info(" Writing file: '{0}'".format(output_file))
551 with open(output_file, "w") as out_file:
552 out_file.write(header_str)
553 for i, line in enumerate(lines[-1:0:-1]):
554 if i == table["nr-of-tests-shown"]:
558 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
559 table["output-file-ext"])
560 with open(input_file, "r") as in_file:
565 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
566 table["output-file-ext"])
567 logging.info(" Writing file: '{0}'".format(output_file))
568 with open(output_file, "w") as out_file:
569 out_file.write(header_str)
570 for i, line in enumerate(lines[1:]):
571 if i == table["nr-of-tests-shown"]:
575 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
576 table["output-file-ext"])
577 logging.info(" Writing file: '{0}'".format(output_file))
578 with open(output_file, "w") as out_file:
579 out_file.write(header_str)
580 for i, line in enumerate(lines[-1:0:-1]):
581 if i == table["nr-of-tests-shown"]:
586 def table_performance_comparison_mrr(table, input_data):
587 """Generate the table(s) with algorithm: table_performance_comparison_mrr
588 specified in the specification file.
590 :param table: Table to generate.
591 :param input_data: Data to process.
592 :type table: pandas.Series
593 :type input_data: InputData
596 logging.info(" Generating the table {0} ...".
597 format(table.get("title", "")))
600 logging.info(" Creating the data set for the {0} '{1}'.".
601 format(table.get("type", ""), table.get("title", "")))
602 data = input_data.filter_data(table, continue_on_error=True)
604 # Prepare the header of the tables
606 header = ["Test case",
607 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
608 "{0} stdev [Mpps]".format(table["reference"]["title"]),
609 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
610 "{0} stdev [Mpps]".format(table["compare"]["title"]),
612 header_str = ",".join(header) + "\n"
613 except (AttributeError, KeyError) as err:
614 logging.error("The model is invalid, missing parameter: {0}".
618 # Prepare data to the table:
620 for job, builds in table["reference"]["data"].items():
622 for tst_name, tst_data in data[job][str(build)].iteritems():
623 if tbl_dict.get(tst_name, None) is None:
624 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
625 "-".join(tst_data["name"].
627 tbl_dict[tst_name] = {"name": name,
631 tbl_dict[tst_name]["ref-data"].\
632 append(tst_data["result"]["throughput"])
634 pass # No data in output.xml for this test
636 for job, builds in table["compare"]["data"].items():
638 for tst_name, tst_data in data[job][str(build)].iteritems():
640 tbl_dict[tst_name]["cmp-data"].\
641 append(tst_data["result"]["throughput"])
645 tbl_dict.pop(tst_name, None)
648 for tst_name in tbl_dict.keys():
649 item = [tbl_dict[tst_name]["name"], ]
650 if tbl_dict[tst_name]["ref-data"]:
651 data_t = tbl_dict[tst_name]["ref-data"]
652 # TODO: Specify window size.
654 item.append(round(mean(data_t) / 1000000, 2))
655 item.append(round(stdev(data_t) / 1000000, 2))
657 item.extend([None, None])
659 item.extend([None, None])
660 if tbl_dict[tst_name]["cmp-data"]:
661 data_t = tbl_dict[tst_name]["cmp-data"]
662 # TODO: Specify window size.
664 item.append(round(mean(data_t) / 1000000, 2))
665 item.append(round(stdev(data_t) / 1000000, 2))
667 item.extend([None, None])
669 item.extend([None, None])
670 if item[1] is not None and item[3] is not None and item[1] != 0:
671 item.append(int(relative_change(float(item[1]), float(item[3]))))
675 # Sort the table according to the relative change
676 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
680 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
681 table["output-file-ext"]),
682 "{0}-2t2c-full{1}".format(table["output-file"],
683 table["output-file-ext"]),
684 "{0}-4t4c-full{1}".format(table["output-file"],
685 table["output-file-ext"])
687 for file_name in tbl_names:
688 logging.info(" Writing file: '{0}'".format(file_name))
689 with open(file_name, "w") as file_handler:
690 file_handler.write(header_str)
692 if file_name.split("-")[-2] in test[0]: # cores
693 test[0] = "-".join(test[0].split("-")[:-1])
694 file_handler.write(",".join([str(item) for item in test]) +
698 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
699 "{0}-2t2c-full.txt".format(table["output-file"]),
700 "{0}-4t4c-full.txt".format(table["output-file"])
703 for i, txt_name in enumerate(tbl_names_txt):
705 logging.info(" Writing file: '{0}'".format(txt_name))
706 with open(tbl_names[i], 'rb') as csv_file:
707 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
708 for row in csv_content:
709 if txt_table is None:
710 txt_table = prettytable.PrettyTable(row)
712 txt_table.add_row(row)
713 txt_table.align["Test case"] = "l"
714 with open(txt_name, "w") as txt_file:
715 txt_file.write(str(txt_table))
718 def table_performance_trending_dashboard(table, input_data):
719 """Generate the table(s) with algorithm: table_performance_comparison
720 specified in the specification file.
722 :param table: Table to generate.
723 :param input_data: Data to process.
724 :type table: pandas.Series
725 :type input_data: InputData
728 logging.info(" Generating the table {0} ...".
729 format(table.get("title", "")))
732 logging.info(" Creating the data set for the {0} '{1}'.".
733 format(table.get("type", ""), table.get("title", "")))
734 data = input_data.filter_data(table, continue_on_error=True)
736 # Prepare the header of the tables
737 header = ["Test Case",
739 "Short-Term Change [%]",
740 "Long-Term Change [%]",
745 header_str = ",".join(header) + "\n"
747 # Prepare data to the table:
749 for job, builds in table["data"].items():
751 for tst_name, tst_data in data[job][str(build)].iteritems():
752 if tst_name.lower() in table["ignore-list"]:
754 if tbl_dict.get(tst_name, None) is None:
755 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
756 "-".join(tst_data["name"].
758 tbl_dict[tst_name] = {"name": name,
759 "data": OrderedDict()}
761 tbl_dict[tst_name]["data"][str(build)] = \
762 tst_data["result"]["throughput"]
763 except (TypeError, KeyError):
764 pass # No data in output.xml for this test
767 for tst_name in tbl_dict.keys():
768 if len(tbl_dict[tst_name]["data"]) < 3:
771 pd_data = pd.Series(tbl_dict[tst_name]["data"])
772 last_key = pd_data.keys()[-1]
773 win_size = min(pd_data.size, table["window"])
774 win_first_idx = pd_data.size - win_size
775 key_14 = pd_data.keys()[win_first_idx]
776 long_win_size = min(pd_data.size, table["long-trend-window"])
777 median_t = pd_data.rolling(window=win_size, min_periods=2).median()
778 median_first_idx = median_t.size - long_win_size
781 [x for x in median_t.values[median_first_idx:-win_size]
786 last_median_t = median_t[last_key]
790 median_t_14 = median_t[key_14]
794 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
795 rel_change_last = nan
797 rel_change_last = round(
798 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
800 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
801 rel_change_long = nan
803 rel_change_long = round(
804 ((last_median_t - max_median) / max_median) * 100, 2)
806 # Classification list:
807 classification_lst, _ = classify_anomalies(pd_data)
809 if classification_lst:
810 if isnan(rel_change_last) and isnan(rel_change_long):
813 [tbl_dict[tst_name]["name"],
814 '-' if isnan(last_median_t) else
815 round(last_median_t / 1000000, 2),
816 '-' if isnan(rel_change_last) else rel_change_last,
817 '-' if isnan(rel_change_long) else rel_change_long,
818 classification_lst[win_first_idx:].count("regression"),
819 classification_lst[win_first_idx:].count("progression"),
820 classification_lst[win_first_idx:].count("outlier")])
822 tbl_lst.sort(key=lambda rel: rel[0])
825 for nrr in range(table["window"], -1, -1):
826 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
827 for nrp in range(table["window"], -1, -1):
828 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
829 for nro in range(table["window"], -1, -1):
830 tbl_out = [item for item in tbl_pro if item[6] == nro]
831 tbl_out.sort(key=lambda rel: rel[2])
832 tbl_sorted.extend(tbl_out)
834 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
836 logging.info(" Writing file: '{0}'".format(file_name))
837 with open(file_name, "w") as file_handler:
838 file_handler.write(header_str)
839 for test in tbl_sorted:
840 file_handler.write(",".join([str(item) for item in test]) + '\n')
842 txt_file_name = "{0}.txt".format(table["output-file"])
844 logging.info(" Writing file: '{0}'".format(txt_file_name))
845 with open(file_name, 'rb') as csv_file:
846 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
847 for row in csv_content:
848 if txt_table is None:
849 txt_table = prettytable.PrettyTable(row)
851 txt_table.add_row(row)
852 txt_table.align["Test case"] = "l"
853 with open(txt_file_name, "w") as txt_file:
854 txt_file.write(str(txt_table))
857 def table_performance_trending_dashboard_html(table, input_data):
858 """Generate the table(s) with algorithm:
859 table_performance_trending_dashboard_html specified in the specification
862 :param table: Table to generate.
863 :param input_data: Data to process.
864 :type table: pandas.Series
865 :type input_data: InputData
868 logging.info(" Generating the table {0} ...".
869 format(table.get("title", "")))
872 with open(table["input-file"], 'rb') as csv_file:
873 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
874 csv_lst = [item for item in csv_content]
876 logging.warning("The input file is not defined.")
878 except csv.Error as err:
879 logging.warning("Not possible to process the file '{0}'.\n{1}".
880 format(table["input-file"], err))
884 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
887 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
888 for idx, item in enumerate(csv_lst[0]):
889 alignment = "left" if idx == 0 else "center"
890 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
894 colors = {"regression": ("#ffcccc", "#ff9999"),
895 "progression": ("#c6ecc6", "#9fdf9f"),
896 "outlier": ("#e6e6e6", "#cccccc"),
897 "normal": ("#e9f1fb", "#d4e4f7")}
898 for r_idx, row in enumerate(csv_lst[1:]):
902 color = "progression"
907 background = colors[color][r_idx % 2]
908 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
911 for c_idx, item in enumerate(row):
912 alignment = "left" if c_idx == 0 else "center"
913 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
921 file_name = "container_memif.html"
924 file_name = "srv6.html"
926 elif "vhost" in item:
927 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
928 file_name = "vm_vhost_l2.html"
929 elif "ip4base" in item:
930 file_name = "vm_vhost_ip4.html"
932 elif "ipsec" in item:
933 file_name = "ipsec.html"
935 elif "ethip4lispip" in item or "ethip4vxlan" in item:
936 file_name = "ip4_tunnels.html"
938 elif "ip4base" in item or "ip4scale" in item:
939 file_name = "ip4.html"
940 if "iacl" in item or "snat" in item or "cop" in item:
941 feature = "-features"
943 elif "ip6base" in item or "ip6scale" in item:
944 file_name = "ip6.html"
946 elif "l2xcbase" in item or "l2xcscale" in item \
947 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
948 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
949 file_name = "l2.html"
951 feature = "-features"
957 elif "xl710" in item:
966 elif "9000b" in item:
978 url = url + file_name + anchor + feature
980 ref = ET.SubElement(td, "a", attrib=dict(href=url))
987 with open(table["output-file"], 'w') as html_file:
988 logging.info(" Writing file: '{0}'".
989 format(table["output-file"]))
990 html_file.write(".. raw:: html\n\n\t")
991 html_file.write(ET.tostring(dashboard))
992 html_file.write("\n\t<p><br><br></p>\n")
994 logging.warning("The output file is not defined.")