1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from collections import OrderedDict
25 from numpy import nan, isnan
26 from xml.etree import ElementTree as ET
28 from errors import PresentationError
29 from utils import mean, stdev, relative_change, remove_outliers,\
30 split_outliers, classify_anomalies
33 def generate_tables(spec, data):
34 """Generate all tables specified in the specification file.
36 :param spec: Specification read from the specification file.
37 :param data: Data to process.
38 :type spec: Specification
42 logging.info("Generating the tables ...")
43 for table in spec.tables:
45 eval(table["algorithm"])(table, data)
46 except NameError as err:
47 logging.error("Probably algorithm '{alg}' is not defined: {err}".
48 format(alg=table["algorithm"], err=repr(err)))
52 def table_details(table, input_data):
53 """Generate the table(s) with algorithm: table_detailed_test_results
54 specified in the specification file.
56 :param table: Table to generate.
57 :param input_data: Data to process.
58 :type table: pandas.Series
59 :type input_data: InputData
62 logging.info(" Generating the table {0} ...".
63 format(table.get("title", "")))
66 logging.info(" Creating the data set for the {0} '{1}'.".
67 format(table.get("type", ""), table.get("title", "")))
68 data = input_data.filter_data(table)
70 # Prepare the header of the tables
72 for column in table["columns"]:
73 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
75 # Generate the data for the table according to the model in the table
77 job = table["data"].keys()[0]
78 build = str(table["data"][job][0])
80 suites = input_data.suites(job, build)
82 logging.error(" No data available. The table will not be generated.")
85 for suite_longname, suite in suites.iteritems():
87 suite_name = suite["name"]
89 for test in data[job][build].keys():
90 if data[job][build][test]["parent"] in suite_name:
92 for column in table["columns"]:
94 col_data = str(data[job][build][test][column["data"].
95 split(" ")[1]]).replace('"', '""')
96 if column["data"].split(" ")[1] in ("vat-history",
98 col_data = replace(col_data, " |br| ", "",
100 col_data = " |prein| {0} |preout| ".\
101 format(col_data[:-5])
102 row_lst.append('"{0}"'.format(col_data))
104 row_lst.append("No data")
105 table_lst.append(row_lst)
107 # Write the data to file
109 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
110 table["output-file-ext"])
111 logging.info(" Writing file: '{}'".format(file_name))
112 with open(file_name, "w") as file_handler:
113 file_handler.write(",".join(header) + "\n")
114 for item in table_lst:
115 file_handler.write(",".join(item) + "\n")
117 logging.info(" Done.")
120 def table_merged_details(table, input_data):
121 """Generate the table(s) with algorithm: table_merged_details
122 specified in the specification file.
124 :param table: Table to generate.
125 :param input_data: Data to process.
126 :type table: pandas.Series
127 :type input_data: InputData
130 logging.info(" Generating the table {0} ...".
131 format(table.get("title", "")))
134 logging.info(" Creating the data set for the {0} '{1}'.".
135 format(table.get("type", ""), table.get("title", "")))
136 data = input_data.filter_data(table)
137 data = input_data.merge_data(data)
138 data.sort_index(inplace=True)
140 logging.info(" Creating the data set for the {0} '{1}'.".
141 format(table.get("type", ""), table.get("title", "")))
142 suites = input_data.filter_data(table, data_set="suites")
143 suites = input_data.merge_data(suites)
145 # Prepare the header of the tables
147 for column in table["columns"]:
148 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
150 for _, suite in suites.iteritems():
152 suite_name = suite["name"]
154 for test in data.keys():
155 if data[test]["parent"] in suite_name:
157 for column in table["columns"]:
159 col_data = str(data[test][column["data"].
160 split(" ")[1]]).replace('"', '""')
161 if column["data"].split(" ")[1] in ("vat-history",
163 col_data = replace(col_data, " |br| ", "",
165 col_data = " |prein| {0} |preout| ".\
166 format(col_data[:-5])
167 row_lst.append('"{0}"'.format(col_data))
169 row_lst.append("No data")
170 table_lst.append(row_lst)
172 # Write the data to file
174 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
175 table["output-file-ext"])
176 logging.info(" Writing file: '{}'".format(file_name))
177 with open(file_name, "w") as file_handler:
178 file_handler.write(",".join(header) + "\n")
179 for item in table_lst:
180 file_handler.write(",".join(item) + "\n")
182 logging.info(" Done.")
185 def table_performance_improvements(table, input_data):
186 """Generate the table(s) with algorithm: table_performance_improvements
187 specified in the specification file.
189 :param table: Table to generate.
190 :param input_data: Data to process.
191 :type table: pandas.Series
192 :type input_data: InputData
195 def _write_line_to_file(file_handler, data):
196 """Write a line to the .csv file.
198 :param file_handler: File handler for the csv file. It must be open for
200 :param data: Item to be written to the file.
201 :type file_handler: BinaryIO
207 if isinstance(item["data"], str):
208 # Remove -?drdisc from the end
209 if item["data"].endswith("drdisc"):
210 item["data"] = item["data"][:-8]
211 line_lst.append(item["data"])
212 elif isinstance(item["data"], float):
213 line_lst.append("{:.1f}".format(item["data"]))
214 elif item["data"] is None:
216 file_handler.write(",".join(line_lst) + "\n")
218 logging.info(" Generating the table {0} ...".
219 format(table.get("title", "")))
222 file_name = table.get("template", None)
225 tmpl = _read_csv_template(file_name)
226 except PresentationError:
227 logging.error(" The template '{0}' does not exist. Skipping the "
228 "table.".format(file_name))
231 logging.error("The template is not defined. Skipping the table.")
235 logging.info(" Creating the data set for the {0} '{1}'.".
236 format(table.get("type", ""), table.get("title", "")))
237 data = input_data.filter_data(table)
239 # Prepare the header of the tables
241 for column in table["columns"]:
242 header.append(column["title"])
244 # Generate the data for the table according to the model in the table
247 for tmpl_item in tmpl:
249 for column in table["columns"]:
250 cmd = column["data"].split(" ")[0]
251 args = column["data"].split(" ")[1:]
252 if cmd == "template":
254 val = float(tmpl_item[int(args[0])])
256 val = tmpl_item[int(args[0])]
257 tbl_item.append({"data": val})
263 for build in data[job]:
265 data_lst.append(float(build[tmpl_item[0]]
266 ["throughput"]["value"]))
267 except (KeyError, TypeError):
271 tbl_item.append({"data": (eval(operation)(data_lst)) /
274 tbl_item.append({"data": None})
275 elif cmd == "operation":
278 nr1 = float(tbl_item[int(args[1])]["data"])
279 nr2 = float(tbl_item[int(args[2])]["data"])
281 tbl_item.append({"data": eval(operation)(nr1, nr2)})
283 tbl_item.append({"data": None})
284 except (IndexError, ValueError, TypeError):
285 logging.error("No data for {0}".format(tbl_item[0]["data"]))
286 tbl_item.append({"data": None})
289 logging.error("Not supported command {0}. Skipping the table.".
292 tbl_lst.append(tbl_item)
294 # Sort the table according to the relative change
295 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
297 # Create the tables and write them to the files
299 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
302 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
305 for file_name in file_names:
306 logging.info(" Writing the file '{0}'".format(file_name))
307 with open(file_name, "w") as file_handler:
308 file_handler.write(",".join(header) + "\n")
310 if isinstance(item[-1]["data"], float):
311 rel_change = round(item[-1]["data"], 1)
313 rel_change = item[-1]["data"]
314 if "ndr_top" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change >= 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_top" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change >= 10.0:
321 _write_line_to_file(file_handler, item)
322 elif "ndr_low" in file_name \
323 and "ndr" in item[0]["data"] \
324 and rel_change < 10.0:
325 _write_line_to_file(file_handler, item)
326 elif "pdr_low" in file_name \
327 and "pdr" in item[0]["data"] \
328 and rel_change < 10.0:
329 _write_line_to_file(file_handler, item)
331 logging.info(" Done.")
334 def _read_csv_template(file_name):
335 """Read the template from a .csv file.
337 :param file_name: Name / full path / relative path of the file to read.
339 :returns: Data from the template as list (lines) of lists (items on line).
341 :raises: PresentationError if it is not possible to read the file.
345 with open(file_name, 'r') as csv_file:
347 for line in csv_file:
348 tmpl_data.append(line[:-1].split(","))
350 except IOError as err:
351 raise PresentationError(str(err), level="ERROR")
354 def table_performance_comparison(table, input_data):
355 """Generate the table(s) with algorithm: table_performance_comparison
356 specified in the specification file.
358 :param table: Table to generate.
359 :param input_data: Data to process.
360 :type table: pandas.Series
361 :type input_data: InputData
364 logging.info(" Generating the table {0} ...".
365 format(table.get("title", "")))
368 logging.info(" Creating the data set for the {0} '{1}'.".
369 format(table.get("type", ""), table.get("title", "")))
370 data = input_data.filter_data(table, continue_on_error=True)
372 # Prepare the header of the tables
374 header = ["Test case", ]
376 history = table.get("history", None)
380 ["{0} Throughput [Mpps]".format(item["title"]),
381 "{0} Stdev [Mpps]".format(item["title"])])
383 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
384 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
385 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
386 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
388 header_str = ",".join(header) + "\n"
389 except (AttributeError, KeyError) as err:
390 logging.error("The model is invalid, missing parameter: {0}".
394 # Prepare data to the table:
396 for job, builds in table["reference"]["data"].items():
398 for tst_name, tst_data in data[job][str(build)].iteritems():
399 if tbl_dict.get(tst_name, None) is None:
400 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
401 "-".join(tst_data["name"].
403 tbl_dict[tst_name] = {"name": name,
407 tbl_dict[tst_name]["ref-data"].\
408 append(tst_data["throughput"]["value"])
410 pass # No data in output.xml for this test
412 for job, builds in table["compare"]["data"].items():
414 for tst_name, tst_data in data[job][str(build)].iteritems():
416 tbl_dict[tst_name]["cmp-data"].\
417 append(tst_data["throughput"]["value"])
421 tbl_dict.pop(tst_name, None)
424 for job, builds in item["data"].items():
426 for tst_name, tst_data in data[job][str(build)].iteritems():
427 if tbl_dict.get(tst_name, None) is None:
429 if tbl_dict[tst_name].get("history", None) is None:
430 tbl_dict[tst_name]["history"] = OrderedDict()
431 if tbl_dict[tst_name]["history"].get(item["title"],
433 tbl_dict[tst_name]["history"][item["title"]] = \
436 tbl_dict[tst_name]["history"][item["title"]].\
437 append(tst_data["throughput"]["value"])
438 except (TypeError, KeyError):
442 for tst_name in tbl_dict.keys():
443 item = [tbl_dict[tst_name]["name"], ]
445 if tbl_dict[tst_name].get("history", None) is not None:
446 for hist_data in tbl_dict[tst_name]["history"].values():
448 data_t = remove_outliers(
449 hist_data, outlier_const=table["outlier-const"])
451 item.append(round(mean(data_t) / 1000000, 2))
452 item.append(round(stdev(data_t) / 1000000, 2))
454 item.extend([None, None])
456 item.extend([None, None])
458 item.extend([None, None])
459 if tbl_dict[tst_name]["ref-data"]:
460 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
461 outlier_const=table["outlier-const"])
462 # TODO: Specify window size.
464 item.append(round(mean(data_t) / 1000000, 2))
465 item.append(round(stdev(data_t) / 1000000, 2))
467 item.extend([None, None])
469 item.extend([None, None])
470 if tbl_dict[tst_name]["cmp-data"]:
471 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
472 outlier_const=table["outlier-const"])
473 # TODO: Specify window size.
475 item.append(round(mean(data_t) / 1000000, 2))
476 item.append(round(stdev(data_t) / 1000000, 2))
478 item.extend([None, None])
480 item.extend([None, None])
481 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
482 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
483 if len(item) == len(header):
486 # Sort the table according to the relative change
487 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
491 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
492 table["output-file-ext"]),
493 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
494 table["output-file-ext"]),
495 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
496 table["output-file-ext"]),
497 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
498 table["output-file-ext"]),
499 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
500 table["output-file-ext"]),
501 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
502 table["output-file-ext"])
504 for file_name in tbl_names:
505 logging.info(" Writing file: '{0}'".format(file_name))
506 with open(file_name, "w") as file_handler:
507 file_handler.write(header_str)
509 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
510 file_name.split("-")[-2] in test[0]): # cores
511 test[0] = "-".join(test[0].split("-")[:-1])
512 file_handler.write(",".join([str(item) for item in test]) +
516 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
517 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
518 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
519 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
520 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
521 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
524 for i, txt_name in enumerate(tbl_names_txt):
526 logging.info(" Writing file: '{0}'".format(txt_name))
527 with open(tbl_names[i], 'rb') as csv_file:
528 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
529 for row in csv_content:
530 if txt_table is None:
531 txt_table = prettytable.PrettyTable(row)
533 txt_table.add_row(row)
534 txt_table.align["Test case"] = "l"
535 with open(txt_name, "w") as txt_file:
536 txt_file.write(str(txt_table))
538 # Selected tests in csv:
539 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
540 table["output-file-ext"])
541 with open(input_file, "r") as in_file:
546 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
547 table["output-file-ext"])
548 logging.info(" Writing file: '{0}'".format(output_file))
549 with open(output_file, "w") as out_file:
550 out_file.write(header_str)
551 for i, line in enumerate(lines[1:]):
552 if i == table["nr-of-tests-shown"]:
556 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
557 table["output-file-ext"])
558 logging.info(" Writing file: '{0}'".format(output_file))
559 with open(output_file, "w") as out_file:
560 out_file.write(header_str)
561 for i, line in enumerate(lines[-1:0:-1]):
562 if i == table["nr-of-tests-shown"]:
566 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
567 table["output-file-ext"])
568 with open(input_file, "r") as in_file:
573 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
574 table["output-file-ext"])
575 logging.info(" Writing file: '{0}'".format(output_file))
576 with open(output_file, "w") as out_file:
577 out_file.write(header_str)
578 for i, line in enumerate(lines[1:]):
579 if i == table["nr-of-tests-shown"]:
583 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
584 table["output-file-ext"])
585 logging.info(" Writing file: '{0}'".format(output_file))
586 with open(output_file, "w") as out_file:
587 out_file.write(header_str)
588 for i, line in enumerate(lines[-1:0:-1]):
589 if i == table["nr-of-tests-shown"]:
594 def table_performance_comparison_mrr(table, input_data):
595 """Generate the table(s) with algorithm: table_performance_comparison_mrr
596 specified in the specification file.
598 :param table: Table to generate.
599 :param input_data: Data to process.
600 :type table: pandas.Series
601 :type input_data: InputData
604 logging.info(" Generating the table {0} ...".
605 format(table.get("title", "")))
608 logging.info(" Creating the data set for the {0} '{1}'.".
609 format(table.get("type", ""), table.get("title", "")))
610 data = input_data.filter_data(table, continue_on_error=True)
612 # Prepare the header of the tables
614 header = ["Test case",
615 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
616 "{0} stdev [Mpps]".format(table["reference"]["title"]),
617 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
618 "{0} stdev [Mpps]".format(table["compare"]["title"]),
620 header_str = ",".join(header) + "\n"
621 except (AttributeError, KeyError) as err:
622 logging.error("The model is invalid, missing parameter: {0}".
626 # Prepare data to the table:
628 for job, builds in table["reference"]["data"].items():
630 for tst_name, tst_data in data[job][str(build)].iteritems():
631 if tbl_dict.get(tst_name, None) is None:
632 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
633 "-".join(tst_data["name"].
635 tbl_dict[tst_name] = {"name": name,
639 tbl_dict[tst_name]["ref-data"].\
640 append(tst_data["result"]["throughput"])
642 pass # No data in output.xml for this test
644 for job, builds in table["compare"]["data"].items():
646 for tst_name, tst_data in data[job][str(build)].iteritems():
648 tbl_dict[tst_name]["cmp-data"].\
649 append(tst_data["result"]["throughput"])
653 tbl_dict.pop(tst_name, None)
656 for tst_name in tbl_dict.keys():
657 item = [tbl_dict[tst_name]["name"], ]
658 if tbl_dict[tst_name]["ref-data"]:
659 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
660 outlier_const=table["outlier-const"])
661 # TODO: Specify window size.
663 item.append(round(mean(data_t) / 1000000, 2))
664 item.append(round(stdev(data_t) / 1000000, 2))
666 item.extend([None, None])
668 item.extend([None, None])
669 if tbl_dict[tst_name]["cmp-data"]:
670 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
671 outlier_const=table["outlier-const"])
672 # TODO: Specify window size.
674 item.append(round(mean(data_t) / 1000000, 2))
675 item.append(round(stdev(data_t) / 1000000, 2))
677 item.extend([None, None])
679 item.extend([None, None])
680 if item[1] is not None and item[3] is not None and item[1] != 0:
681 item.append(int(relative_change(float(item[1]), float(item[3]))))
685 # Sort the table according to the relative change
686 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
690 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
691 table["output-file-ext"]),
692 "{0}-2t2c-full{1}".format(table["output-file"],
693 table["output-file-ext"]),
694 "{0}-4t4c-full{1}".format(table["output-file"],
695 table["output-file-ext"])
697 for file_name in tbl_names:
698 logging.info(" Writing file: '{0}'".format(file_name))
699 with open(file_name, "w") as file_handler:
700 file_handler.write(header_str)
702 if file_name.split("-")[-2] in test[0]: # cores
703 test[0] = "-".join(test[0].split("-")[:-1])
704 file_handler.write(",".join([str(item) for item in test]) +
708 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
709 "{0}-2t2c-full.txt".format(table["output-file"]),
710 "{0}-4t4c-full.txt".format(table["output-file"])
713 for i, txt_name in enumerate(tbl_names_txt):
715 logging.info(" Writing file: '{0}'".format(txt_name))
716 with open(tbl_names[i], 'rb') as csv_file:
717 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
718 for row in csv_content:
719 if txt_table is None:
720 txt_table = prettytable.PrettyTable(row)
722 txt_table.add_row(row)
723 txt_table.align["Test case"] = "l"
724 with open(txt_name, "w") as txt_file:
725 txt_file.write(str(txt_table))
728 def table_performance_trending_dashboard(table, input_data):
729 """Generate the table(s) with algorithm: table_performance_comparison
730 specified in the specification file.
732 :param table: Table to generate.
733 :param input_data: Data to process.
734 :type table: pandas.Series
735 :type input_data: InputData
738 logging.info(" Generating the table {0} ...".
739 format(table.get("title", "")))
742 logging.info(" Creating the data set for the {0} '{1}'.".
743 format(table.get("type", ""), table.get("title", "")))
744 data = input_data.filter_data(table, continue_on_error=True)
746 # Prepare the header of the tables
747 header = ["Test Case",
749 "Short-Term Change [%]",
750 "Long-Term Change [%]",
755 header_str = ",".join(header) + "\n"
757 # Prepare data to the table:
759 for job, builds in table["data"].items():
761 for tst_name, tst_data in data[job][str(build)].iteritems():
762 if tst_name.lower() in table["ignore-list"]:
764 if tbl_dict.get(tst_name, None) is None:
765 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
766 "-".join(tst_data["name"].
768 tbl_dict[tst_name] = {"name": name,
769 "data": OrderedDict()}
771 tbl_dict[tst_name]["data"][str(build)] = \
772 tst_data["result"]["throughput"]
773 except (TypeError, KeyError):
774 pass # No data in output.xml for this test
777 for tst_name in tbl_dict.keys():
778 if len(tbl_dict[tst_name]["data"]) < 3:
781 pd_data = pd.Series(tbl_dict[tst_name]["data"])
782 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
783 window=table["window"])
784 last_key = data_t.keys()[-1]
785 win_size = min(data_t.size, table["window"])
786 win_first_idx = data_t.size - win_size
787 key_14 = data_t.keys()[win_first_idx]
788 long_win_size = min(data_t.size, table["long-trend-window"])
789 median_t = data_t.rolling(window=win_size, min_periods=2).median()
790 median_first_idx = median_t.size - long_win_size
793 [x for x in median_t.values[median_first_idx:-win_size]
798 last_median_t = median_t[last_key]
802 median_t_14 = median_t[key_14]
806 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
807 rel_change_last = nan
809 rel_change_last = round(
810 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
812 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
813 rel_change_long = nan
815 rel_change_long = round(
816 ((last_median_t - max_median) / max_median) * 100, 2)
818 # Classification list:
819 classification_lst = classify_anomalies(data_t, window=14)
821 if classification_lst:
822 if isnan(rel_change_last) and isnan(rel_change_long):
825 [tbl_dict[tst_name]["name"],
826 '-' if isnan(last_median_t) else
827 round(last_median_t / 1000000, 2),
828 '-' if isnan(rel_change_last) else rel_change_last,
829 '-' if isnan(rel_change_long) else rel_change_long,
830 classification_lst[win_first_idx:].count("regression"),
831 classification_lst[win_first_idx:].count("progression"),
832 classification_lst[win_first_idx:].count("outlier")])
834 tbl_lst.sort(key=lambda rel: rel[0])
837 for nrr in range(table["window"], -1, -1):
838 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
839 for nrp in range(table["window"], -1, -1):
840 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
841 for nro in range(table["window"], -1, -1):
842 tbl_out = [item for item in tbl_pro if item[6] == nro]
843 tbl_out.sort(key=lambda rel: rel[2])
844 tbl_sorted.extend(tbl_out)
846 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
848 logging.info(" Writing file: '{0}'".format(file_name))
849 with open(file_name, "w") as file_handler:
850 file_handler.write(header_str)
851 for test in tbl_sorted:
852 file_handler.write(",".join([str(item) for item in test]) + '\n')
854 txt_file_name = "{0}.txt".format(table["output-file"])
856 logging.info(" Writing file: '{0}'".format(txt_file_name))
857 with open(file_name, 'rb') as csv_file:
858 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
859 for row in csv_content:
860 if txt_table is None:
861 txt_table = prettytable.PrettyTable(row)
863 txt_table.add_row(row)
864 txt_table.align["Test case"] = "l"
865 with open(txt_file_name, "w") as txt_file:
866 txt_file.write(str(txt_table))
869 def table_performance_trending_dashboard_html(table, input_data):
870 """Generate the table(s) with algorithm:
871 table_performance_trending_dashboard_html specified in the specification
874 :param table: Table to generate.
875 :param input_data: Data to process.
876 :type table: pandas.Series
877 :type input_data: InputData
880 logging.info(" Generating the table {0} ...".
881 format(table.get("title", "")))
884 with open(table["input-file"], 'rb') as csv_file:
885 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
886 csv_lst = [item for item in csv_content]
888 logging.warning("The input file is not defined.")
890 except csv.Error as err:
891 logging.warning("Not possible to process the file '{0}'.\n{1}".
892 format(table["input-file"], err))
896 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
899 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
900 for idx, item in enumerate(csv_lst[0]):
901 alignment = "left" if idx == 0 else "center"
902 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
906 colors = {"regression": ("#ffcccc", "#ff9999"),
907 "progression": ("#c6ecc6", "#9fdf9f"),
908 "outlier": ("#e6e6e6", "#cccccc"),
909 "normal": ("#e9f1fb", "#d4e4f7")}
910 for r_idx, row in enumerate(csv_lst[1:]):
914 color = "progression"
919 background = colors[color][r_idx % 2]
920 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
923 for c_idx, item in enumerate(row):
924 alignment = "left" if c_idx == 0 else "center"
925 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
932 if "lbdpdk" in item or "lbvpp" in item:
933 file_name = "link_bonding.html"
935 elif "testpmd" in item or "l3fwd" in item:
936 file_name = "dpdk.html"
938 elif "memif" in item:
939 file_name = "container_memif.html"
942 file_name = "srv6.html"
944 elif "vhost" in item:
945 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
946 file_name = "vm_vhost_l2.html"
947 elif "ip4base" in item:
948 file_name = "vm_vhost_ip4.html"
950 elif "ipsec" in item:
951 file_name = "ipsec.html"
953 elif "ethip4lispip" in item or "ethip4vxlan" in item:
954 file_name = "ip4_tunnels.html"
956 elif "ip4base" in item or "ip4scale" in item:
957 file_name = "ip4.html"
958 if "iacl" in item or "snat" in item or "cop" in item:
959 feature = "-features"
961 elif "ip6base" in item or "ip6scale" in item:
962 file_name = "ip6.html"
964 elif "l2xcbase" in item or "l2xcscale" in item \
965 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
966 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
967 file_name = "l2.html"
969 feature = "-features"
975 elif "xl710" in item:
984 elif "9000b" in item:
996 url = url + file_name + anchor + feature
998 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1005 with open(table["output-file"], 'w') as html_file:
1006 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1007 html_file.write(".. raw:: html\n\n\t")
1008 html_file.write(ET.tostring(dashboard))
1009 html_file.write("\n\t<p><br><br></p>\n")
1011 logging.warning("The output file is not defined.")