1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
27 from errors import PresentationError
28 from utils import mean, stdev, relative_change, remove_outliers,\
29 split_outliers, classify_anomalies, convert_csv_to_pretty_txt
32 def generate_tables(spec, data):
33 """Generate all tables specified in the specification file.
35 :param spec: Specification read from the specification file.
36 :param data: Data to process.
37 :type spec: Specification
41 logging.info("Generating the tables ...")
42 for table in spec.tables:
44 eval(table["algorithm"])(table, data)
45 except NameError as err:
46 logging.error("Probably algorithm '{alg}' is not defined: {err}".
47 format(alg=table["algorithm"], err=repr(err)))
51 def table_details(table, input_data):
52 """Generate the table(s) with algorithm: table_detailed_test_results
53 specified in the specification file.
55 :param table: Table to generate.
56 :param input_data: Data to process.
57 :type table: pandas.Series
58 :type input_data: InputData
61 logging.info(" Generating the table {0} ...".
62 format(table.get("title", "")))
65 logging.info(" Creating the data set for the {0} '{1}'.".
66 format(table.get("type", ""), table.get("title", "")))
67 data = input_data.filter_data(table)
69 # Prepare the header of the tables
71 for column in table["columns"]:
72 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
74 # Generate the data for the table according to the model in the table
76 job = table["data"].keys()[0]
77 build = str(table["data"][job][0])
79 suites = input_data.suites(job, build)
81 logging.error(" No data available. The table will not be generated.")
84 for suite_longname, suite in suites.iteritems():
86 suite_name = suite["name"]
88 for test in data[job][build].keys():
89 if data[job][build][test]["parent"] in suite_name:
91 for column in table["columns"]:
93 col_data = str(data[job][build][test][column["data"].
94 split(" ")[1]]).replace('"', '""')
95 if column["data"].split(" ")[1] in ("vat-history",
97 col_data = replace(col_data, " |br| ", "",
99 col_data = " |prein| {0} |preout| ".\
100 format(col_data[:-5])
101 row_lst.append('"{0}"'.format(col_data))
103 row_lst.append("No data")
104 table_lst.append(row_lst)
106 # Write the data to file
108 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
109 table["output-file-ext"])
110 logging.info(" Writing file: '{}'".format(file_name))
111 with open(file_name, "w") as file_handler:
112 file_handler.write(",".join(header) + "\n")
113 for item in table_lst:
114 file_handler.write(",".join(item) + "\n")
116 logging.info(" Done.")
119 def table_merged_details(table, input_data):
120 """Generate the table(s) with algorithm: table_merged_details
121 specified in the specification file.
123 :param table: Table to generate.
124 :param input_data: Data to process.
125 :type table: pandas.Series
126 :type input_data: InputData
129 logging.info(" Generating the table {0} ...".
130 format(table.get("title", "")))
133 logging.info(" Creating the data set for the {0} '{1}'.".
134 format(table.get("type", ""), table.get("title", "")))
135 data = input_data.filter_data(table)
136 data = input_data.merge_data(data)
137 data.sort_index(inplace=True)
139 logging.info(" Creating the data set for the {0} '{1}'.".
140 format(table.get("type", ""), table.get("title", "")))
141 suites = input_data.filter_data(table, data_set="suites")
142 suites = input_data.merge_data(suites)
144 # Prepare the header of the tables
146 for column in table["columns"]:
147 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
149 for _, suite in suites.iteritems():
151 suite_name = suite["name"]
153 for test in data.keys():
154 if data[test]["parent"] in suite_name:
156 for column in table["columns"]:
158 col_data = str(data[test][column["data"].
159 split(" ")[1]]).replace('"', '""')
160 if column["data"].split(" ")[1] in ("vat-history",
162 col_data = replace(col_data, " |br| ", "",
164 col_data = " |prein| {0} |preout| ".\
165 format(col_data[:-5])
166 row_lst.append('"{0}"'.format(col_data))
168 row_lst.append("No data")
169 table_lst.append(row_lst)
171 # Write the data to file
173 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
174 table["output-file-ext"])
175 logging.info(" Writing file: '{}'".format(file_name))
176 with open(file_name, "w") as file_handler:
177 file_handler.write(",".join(header) + "\n")
178 for item in table_lst:
179 file_handler.write(",".join(item) + "\n")
181 logging.info(" Done.")
184 def table_performance_improvements(table, input_data):
185 """Generate the table(s) with algorithm: table_performance_improvements
186 specified in the specification file.
188 :param table: Table to generate.
189 :param input_data: Data to process.
190 :type table: pandas.Series
191 :type input_data: InputData
194 def _write_line_to_file(file_handler, data):
195 """Write a line to the .csv file.
197 :param file_handler: File handler for the csv file. It must be open for
199 :param data: Item to be written to the file.
200 :type file_handler: BinaryIO
206 if isinstance(item["data"], str):
207 # Remove -?drdisc from the end
208 if item["data"].endswith("drdisc"):
209 item["data"] = item["data"][:-8]
210 line_lst.append(item["data"])
211 elif isinstance(item["data"], float):
212 line_lst.append("{:.1f}".format(item["data"]))
213 elif item["data"] is None:
215 file_handler.write(",".join(line_lst) + "\n")
217 logging.info(" Generating the table {0} ...".
218 format(table.get("title", "")))
221 file_name = table.get("template", None)
224 tmpl = _read_csv_template(file_name)
225 except PresentationError:
226 logging.error(" The template '{0}' does not exist. Skipping the "
227 "table.".format(file_name))
230 logging.error("The template is not defined. Skipping the table.")
234 logging.info(" Creating the data set for the {0} '{1}'.".
235 format(table.get("type", ""), table.get("title", "")))
236 data = input_data.filter_data(table)
238 # Prepare the header of the tables
240 for column in table["columns"]:
241 header.append(column["title"])
243 # Generate the data for the table according to the model in the table
246 for tmpl_item in tmpl:
248 for column in table["columns"]:
249 cmd = column["data"].split(" ")[0]
250 args = column["data"].split(" ")[1:]
251 if cmd == "template":
253 val = float(tmpl_item[int(args[0])])
255 val = tmpl_item[int(args[0])]
256 tbl_item.append({"data": val})
262 for build in data[job]:
264 data_lst.append(float(build[tmpl_item[0]]
265 ["throughput"]["value"]))
266 except (KeyError, TypeError):
270 tbl_item.append({"data": (eval(operation)(data_lst)) /
273 tbl_item.append({"data": None})
274 elif cmd == "operation":
277 nr1 = float(tbl_item[int(args[1])]["data"])
278 nr2 = float(tbl_item[int(args[2])]["data"])
280 tbl_item.append({"data": eval(operation)(nr1, nr2)})
282 tbl_item.append({"data": None})
283 except (IndexError, ValueError, TypeError):
284 logging.error("No data for {0}".format(tbl_item[0]["data"]))
285 tbl_item.append({"data": None})
288 logging.error("Not supported command {0}. Skipping the table.".
291 tbl_lst.append(tbl_item)
293 # Sort the table according to the relative change
294 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
296 # Create the tables and write them to the files
298 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
299 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
304 for file_name in file_names:
305 logging.info(" Writing the file '{0}'".format(file_name))
306 with open(file_name, "w") as file_handler:
307 file_handler.write(",".join(header) + "\n")
309 if isinstance(item[-1]["data"], float):
310 rel_change = round(item[-1]["data"], 1)
312 rel_change = item[-1]["data"]
313 if "ndr_top" in file_name \
314 and "ndr" in item[0]["data"] \
315 and rel_change >= 10.0:
316 _write_line_to_file(file_handler, item)
317 elif "pdr_top" in file_name \
318 and "pdr" in item[0]["data"] \
319 and rel_change >= 10.0:
320 _write_line_to_file(file_handler, item)
321 elif "ndr_low" in file_name \
322 and "ndr" in item[0]["data"] \
323 and rel_change < 10.0:
324 _write_line_to_file(file_handler, item)
325 elif "pdr_low" in file_name \
326 and "pdr" in item[0]["data"] \
327 and rel_change < 10.0:
328 _write_line_to_file(file_handler, item)
330 logging.info(" Done.")
333 def _read_csv_template(file_name):
334 """Read the template from a .csv file.
336 :param file_name: Name / full path / relative path of the file to read.
338 :returns: Data from the template as list (lines) of lists (items on line).
340 :raises: PresentationError if it is not possible to read the file.
344 with open(file_name, 'r') as csv_file:
346 for line in csv_file:
347 tmpl_data.append(line[:-1].split(","))
349 except IOError as err:
350 raise PresentationError(str(err), level="ERROR")
353 def table_performance_comparison(table, input_data):
354 """Generate the table(s) with algorithm: table_performance_comparison
355 specified in the specification file.
357 :param table: Table to generate.
358 :param input_data: Data to process.
359 :type table: pandas.Series
360 :type input_data: InputData
363 logging.info(" Generating the table {0} ...".
364 format(table.get("title", "")))
367 logging.info(" Creating the data set for the {0} '{1}'.".
368 format(table.get("type", ""), table.get("title", "")))
369 data = input_data.filter_data(table, continue_on_error=True)
371 # Prepare the header of the tables
373 header = ["Test case", ]
375 history = table.get("history", None)
379 ["{0} Throughput [Mpps]".format(item["title"]),
380 "{0} Stdev [Mpps]".format(item["title"])])
382 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
383 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
384 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
385 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
387 header_str = ",".join(header) + "\n"
388 except (AttributeError, KeyError) as err:
389 logging.error("The model is invalid, missing parameter: {0}".
393 # Prepare data to the table:
395 for job, builds in table["reference"]["data"].items():
397 for tst_name, tst_data in data[job][str(build)].iteritems():
398 if tbl_dict.get(tst_name, None) is None:
399 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
400 "-".join(tst_data["name"].
402 tbl_dict[tst_name] = {"name": name,
406 tbl_dict[tst_name]["ref-data"].\
407 append(tst_data["throughput"]["value"])
409 pass # No data in output.xml for this test
411 for job, builds in table["compare"]["data"].items():
413 for tst_name, tst_data in data[job][str(build)].iteritems():
415 tbl_dict[tst_name]["cmp-data"].\
416 append(tst_data["throughput"]["value"])
420 tbl_dict.pop(tst_name, None)
423 for job, builds in item["data"].items():
425 for tst_name, tst_data in data[job][str(build)].iteritems():
426 if tbl_dict.get(tst_name, None) is None:
428 if tbl_dict[tst_name].get("history", None) is None:
429 tbl_dict[tst_name]["history"] = OrderedDict()
430 if tbl_dict[tst_name]["history"].get(item["title"],
432 tbl_dict[tst_name]["history"][item["title"]] = \
435 tbl_dict[tst_name]["history"][item["title"]].\
436 append(tst_data["throughput"]["value"])
437 except (TypeError, KeyError):
441 for tst_name in tbl_dict.keys():
442 item = [tbl_dict[tst_name]["name"], ]
444 if tbl_dict[tst_name].get("history", None) is not None:
445 for hist_data in tbl_dict[tst_name]["history"].values():
447 data_t = remove_outliers(
448 hist_data, outlier_const=table["outlier-const"])
450 item.append(round(mean(data_t) / 1000000, 2))
451 item.append(round(stdev(data_t) / 1000000, 2))
453 item.extend([None, None])
455 item.extend([None, None])
457 item.extend([None, None])
458 if tbl_dict[tst_name]["ref-data"]:
459 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
460 outlier_const=table["outlier-const"])
461 # TODO: Specify window size.
463 item.append(round(mean(data_t) / 1000000, 2))
464 item.append(round(stdev(data_t) / 1000000, 2))
466 item.extend([None, None])
468 item.extend([None, None])
469 if tbl_dict[tst_name]["cmp-data"]:
470 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
471 outlier_const=table["outlier-const"])
472 # TODO: Specify window size.
474 item.append(round(mean(data_t) / 1000000, 2))
475 item.append(round(stdev(data_t) / 1000000, 2))
477 item.extend([None, None])
479 item.extend([None, None])
480 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
481 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
482 if len(item) == len(header):
485 # Sort the table according to the relative change
486 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
490 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
491 table["output-file-ext"]),
492 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
493 table["output-file-ext"]),
494 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
495 table["output-file-ext"]),
496 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
497 table["output-file-ext"]),
498 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
499 table["output-file-ext"]),
500 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
501 table["output-file-ext"])
503 for file_name in tbl_names:
504 logging.info(" Writing file: '{0}'".format(file_name))
505 with open(file_name, "w") as file_handler:
506 file_handler.write(header_str)
508 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
509 file_name.split("-")[-2] in test[0]): # cores
510 test[0] = "-".join(test[0].split("-")[:-1])
511 file_handler.write(",".join([str(item) for item in test]) +
515 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
516 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
517 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
518 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
519 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
520 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
523 for i, txt_name in enumerate(tbl_names_txt):
524 logging.info(" Writing file: '{0}'".format(txt_name))
525 convert_csv_to_pretty_txt(tbl_names[i], txt_name)
527 # Selected tests in csv:
528 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
529 table["output-file-ext"])
530 with open(input_file, "r") as in_file:
535 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
536 table["output-file-ext"])
537 logging.info(" Writing file: '{0}'".format(output_file))
538 with open(output_file, "w") as out_file:
539 out_file.write(header_str)
540 for i, line in enumerate(lines[1:]):
541 if i == table["nr-of-tests-shown"]:
545 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
546 table["output-file-ext"])
547 logging.info(" Writing file: '{0}'".format(output_file))
548 with open(output_file, "w") as out_file:
549 out_file.write(header_str)
550 for i, line in enumerate(lines[-1:0:-1]):
551 if i == table["nr-of-tests-shown"]:
555 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
556 table["output-file-ext"])
557 with open(input_file, "r") as in_file:
562 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
563 table["output-file-ext"])
564 logging.info(" Writing file: '{0}'".format(output_file))
565 with open(output_file, "w") as out_file:
566 out_file.write(header_str)
567 for i, line in enumerate(lines[1:]):
568 if i == table["nr-of-tests-shown"]:
572 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
573 table["output-file-ext"])
574 logging.info(" Writing file: '{0}'".format(output_file))
575 with open(output_file, "w") as out_file:
576 out_file.write(header_str)
577 for i, line in enumerate(lines[-1:0:-1]):
578 if i == table["nr-of-tests-shown"]:
583 def table_performance_comparison_mrr(table, input_data):
584 """Generate the table(s) with algorithm: table_performance_comparison_mrr
585 specified in the specification file.
587 :param table: Table to generate.
588 :param input_data: Data to process.
589 :type table: pandas.Series
590 :type input_data: InputData
593 logging.info(" Generating the table {0} ...".
594 format(table.get("title", "")))
597 logging.info(" Creating the data set for the {0} '{1}'.".
598 format(table.get("type", ""), table.get("title", "")))
599 data = input_data.filter_data(table, continue_on_error=True)
601 # Prepare the header of the tables
603 header = ["Test case",
604 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
605 "{0} stdev [Mpps]".format(table["reference"]["title"]),
606 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
607 "{0} stdev [Mpps]".format(table["compare"]["title"]),
609 header_str = ",".join(header) + "\n"
610 except (AttributeError, KeyError) as err:
611 logging.error("The model is invalid, missing parameter: {0}".
615 # Prepare data to the table:
617 for job, builds in table["reference"]["data"].items():
619 for tst_name, tst_data in data[job][str(build)].iteritems():
620 if tbl_dict.get(tst_name, None) is None:
621 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
622 "-".join(tst_data["name"].
624 tbl_dict[tst_name] = {"name": name,
628 tbl_dict[tst_name]["ref-data"].\
629 append(tst_data["result"]["throughput"])
631 pass # No data in output.xml for this test
633 for job, builds in table["compare"]["data"].items():
635 for tst_name, tst_data in data[job][str(build)].iteritems():
637 tbl_dict[tst_name]["cmp-data"].\
638 append(tst_data["result"]["throughput"])
642 tbl_dict.pop(tst_name, None)
645 for tst_name in tbl_dict.keys():
646 item = [tbl_dict[tst_name]["name"], ]
647 if tbl_dict[tst_name]["ref-data"]:
648 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
649 outlier_const=table["outlier-const"])
650 # TODO: Specify window size.
652 item.append(round(mean(data_t) / 1000000, 2))
653 item.append(round(stdev(data_t) / 1000000, 2))
655 item.extend([None, None])
657 item.extend([None, None])
658 if tbl_dict[tst_name]["cmp-data"]:
659 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
660 outlier_const=table["outlier-const"])
661 # TODO: Specify window size.
663 item.append(round(mean(data_t) / 1000000, 2))
664 item.append(round(stdev(data_t) / 1000000, 2))
666 item.extend([None, None])
668 item.extend([None, None])
669 if item[1] is not None and item[3] is not None and item[1] != 0:
670 item.append(int(relative_change(float(item[1]), float(item[3]))))
674 # Sort the table according to the relative change
675 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
679 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
680 table["output-file-ext"]),
681 "{0}-2t2c-full{1}".format(table["output-file"],
682 table["output-file-ext"]),
683 "{0}-4t4c-full{1}".format(table["output-file"],
684 table["output-file-ext"])
686 for file_name in tbl_names:
687 logging.info(" Writing file: '{0}'".format(file_name))
688 with open(file_name, "w") as file_handler:
689 file_handler.write(header_str)
691 if file_name.split("-")[-2] in test[0]: # cores
692 test[0] = "-".join(test[0].split("-")[:-1])
693 file_handler.write(",".join([str(item) for item in test]) +
697 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
698 "{0}-2t2c-full.txt".format(table["output-file"]),
699 "{0}-4t4c-full.txt".format(table["output-file"])
702 for i, txt_name in enumerate(tbl_names_txt):
703 logging.info(" Writing file: '{0}'".format(txt_name))
704 convert_csv_to_pretty_txt(tbl_names[i], txt_name)
707 def table_performance_trending_dashboard(table, input_data):
708 """Generate the table(s) with algorithm: table_performance_comparison
709 specified in the specification file.
711 :param table: Table to generate.
712 :param input_data: Data to process.
713 :type table: pandas.Series
714 :type input_data: InputData
717 logging.info(" Generating the table {0} ...".
718 format(table.get("title", "")))
721 logging.info(" Creating the data set for the {0} '{1}'.".
722 format(table.get("type", ""), table.get("title", "")))
723 data = input_data.filter_data(table, continue_on_error=True)
725 # Prepare the header of the tables
726 header = ["Test Case",
728 "Short-Term Change [%]",
729 "Long-Term Change [%]",
734 header_str = ",".join(header) + "\n"
736 # Prepare data to the table:
738 for job, builds in table["data"].items():
740 for tst_name, tst_data in data[job][str(build)].iteritems():
741 if tst_name.lower() in table["ignore-list"]:
743 if tbl_dict.get(tst_name, None) is None:
744 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
745 "-".join(tst_data["name"].
747 tbl_dict[tst_name] = {"name": name,
748 "data": OrderedDict()}
750 tbl_dict[tst_name]["data"][str(build)] = \
751 tst_data["result"]["throughput"]
752 except (TypeError, KeyError):
753 pass # No data in output.xml for this test
756 for tst_name in tbl_dict.keys():
757 if len(tbl_dict[tst_name]["data"]) < 3:
760 pd_data = pd.Series(tbl_dict[tst_name]["data"])
761 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
762 window=table["window"])
763 last_key = data_t.keys()[-1]
764 win_size = min(data_t.size, table["window"])
765 win_first_idx = data_t.size - win_size
766 key_14 = data_t.keys()[win_first_idx]
767 long_win_size = min(data_t.size, table["long-trend-window"])
768 median_t = data_t.rolling(window=win_size, min_periods=2).median()
769 median_first_idx = median_t.size - long_win_size
772 [x for x in median_t.values[median_first_idx:-win_size]
777 last_median_t = median_t[last_key]
781 median_t_14 = median_t[key_14]
785 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
786 rel_change_last = nan
788 rel_change_last = round(
789 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
791 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
792 rel_change_long = nan
794 rel_change_long = round(
795 ((last_median_t - max_median) / max_median) * 100, 2)
797 # Classification list:
798 classification_lst = classify_anomalies(data_t, window=14)
800 if classification_lst:
801 if isnan(rel_change_last) and isnan(rel_change_long):
804 [tbl_dict[tst_name]["name"],
805 '-' if isnan(last_median_t) else
806 round(last_median_t / 1000000, 2),
807 '-' if isnan(rel_change_last) else rel_change_last,
808 '-' if isnan(rel_change_long) else rel_change_long,
809 classification_lst[win_first_idx:].count("regression"),
810 classification_lst[win_first_idx:].count("progression"),
811 classification_lst[win_first_idx:].count("outlier")])
813 tbl_lst.sort(key=lambda rel: rel[0])
816 for nrr in range(table["window"], -1, -1):
817 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
818 for nrp in range(table["window"], -1, -1):
819 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
820 for nro in range(table["window"], -1, -1):
821 tbl_out = [item for item in tbl_pro if item[6] == nro]
822 tbl_out.sort(key=lambda rel: rel[2])
823 tbl_sorted.extend(tbl_out)
825 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
827 logging.info(" Writing file: '{0}'".format(file_name))
828 with open(file_name, "w") as file_handler:
829 file_handler.write(header_str)
830 for test in tbl_sorted:
831 file_handler.write(",".join([str(item) for item in test]) + '\n')
833 txt_file_name = "{0}.txt".format(table["output-file"])
834 logging.info(" Writing file: '{0}'".format(txt_file_name))
835 convert_csv_to_pretty_txt(file_name, txt_file_name)
838 def _generate_url(base, test_name):
839 """Generate URL to a trending plot from the name of the test case.
841 :param base: The base part of URL common to all test cases.
842 :param test_name: The name of the test case.
845 :returns: The URL to the plot with the trending data for the given test
855 if "lbdpdk" in test_name or "lbvpp" in test_name:
856 file_name = "link_bonding.html"
858 elif "testpmd" in test_name or "l3fwd" in test_name:
859 file_name = "dpdk.html"
861 elif "memif" in test_name:
862 file_name = "container_memif.html"
864 elif "srv6" in test_name:
865 file_name = "srv6.html"
867 elif "vhost" in test_name:
868 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
869 file_name = "vm_vhost_l2.html"
870 elif "ip4base" in test_name:
871 file_name = "vm_vhost_ip4.html"
873 elif "ipsec" in test_name:
874 file_name = "ipsec.html"
876 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
877 file_name = "ip4_tunnels.html"
879 elif "ip4base" in test_name or "ip4scale" in test_name:
880 file_name = "ip4.html"
881 if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
882 feature = "-features"
884 elif "ip6base" in test_name or "ip6scale" in test_name:
885 file_name = "ip6.html"
887 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
888 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
889 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
890 file_name = "l2.html"
891 if "iacl" in test_name:
892 feature = "-features"
894 if "x520" in test_name:
896 elif "x710" in test_name:
898 elif "xl710" in test_name:
901 if "64b" in test_name:
903 elif "78b" in test_name:
905 elif "imix" in test_name:
907 elif "9000b" in test_name:
909 elif "1518" in test_name:
912 if "1t1c" in test_name:
914 elif "2t2c" in test_name:
916 elif "4t4c" in test_name:
919 return url + file_name + anchor + feature
922 def table_performance_trending_dashboard_html(table, input_data):
923 """Generate the table(s) with algorithm:
924 table_performance_trending_dashboard_html specified in the specification
927 :param table: Table to generate.
928 :param input_data: Data to process.
929 :type table: pandas.Series
930 :type input_data: InputData
933 logging.info(" Generating the table {0} ...".
934 format(table.get("title", "")))
937 with open(table["input-file"], 'rb') as csv_file:
938 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
939 csv_lst = [item for item in csv_content]
941 logging.warning("The input file is not defined.")
943 except csv.Error as err:
944 logging.warning("Not possible to process the file '{0}'.\n{1}".
945 format(table["input-file"], err))
949 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
952 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
953 for idx, item in enumerate(csv_lst[0]):
954 alignment = "left" if idx == 0 else "center"
955 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
959 colors = {"regression": ("#ffcccc", "#ff9999"),
960 "progression": ("#c6ecc6", "#9fdf9f"),
961 "outlier": ("#e6e6e6", "#cccccc"),
962 "normal": ("#e9f1fb", "#d4e4f7")}
963 for r_idx, row in enumerate(csv_lst[1:]):
967 color = "progression"
972 background = colors[color][r_idx % 2]
973 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
976 for c_idx, item in enumerate(row):
977 alignment = "left" if c_idx == 0 else "center"
978 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
981 url = _generate_url("../trending/", item)
982 ref = ET.SubElement(td, "a", attrib=dict(href=url))
987 with open(table["output-file"], 'w') as html_file:
988 logging.info(" Writing file: '{0}'".format(table["output-file"]))
989 html_file.write(".. raw:: html\n\n\t")
990 html_file.write(ET.tostring(dashboard))
991 html_file.write("\n\t<p><br><br></p>\n")
993 logging.warning("The output file is not defined.")
997 def table_failed_tests(table, input_data):
998 """Generate the table(s) with algorithm: table_failed_tests
999 specified in the specification file.
1001 :param table: Table to generate.
1002 :param input_data: Data to process.
1003 :type table: pandas.Series
1004 :type input_data: InputData
1007 logging.info(" Generating the table {0} ...".
1008 format(table.get("title", "")))
1010 # Transform the data
1011 logging.info(" Creating the data set for the {0} '{1}'.".
1012 format(table.get("type", ""), table.get("title", "")))
1013 data = input_data.filter_data(table, continue_on_error=True)
1015 # Prepare the header of the tables
1016 header = ["Test Case",
1018 "Last Fail [Timestamp]",
1019 "Last Fail [VPP Build]",
1020 "Last Fail [CSIT Build]"]
1022 # Generate the data for the table according to the model in the table
1025 for job, builds in table["data"].items():
1026 for build in builds:
1028 for tst_name, tst_data in data[job][build].iteritems():
1029 if tst_name.lower() in table["ignore-list"]:
1031 if tbl_dict.get(tst_name, None) is None:
1032 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
1033 "-".join(tst_data["name"].
1035 tbl_dict[tst_name] = {"name": name,
1036 "data": OrderedDict()}
1038 tbl_dict[tst_name]["data"][build] = (
1040 input_data.metadata(job, build).get("generated", ""),
1041 input_data.metadata(job, build).get("version", ""),
1043 except (TypeError, KeyError):
1044 pass # No data in output.xml for this test
1047 for tst_data in tbl_dict.values():
1048 win_size = min(len(tst_data["data"]), table["window"])
1050 for val in tst_data["data"].values()[-win_size:]:
1051 if val[0] == "FAIL":
1053 fails_last_date = val[1]
1054 fails_last_vpp = val[2]
1055 fails_last_csit = val[3]
1057 tbl_lst.append([tst_data["name"],
1061 "mrr-daily-build-{0}".format(fails_last_csit)])
1063 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1065 for nrf in range(table["window"], -1, -1):
1066 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1067 tbl_sorted.extend(tbl_fails)
1068 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1070 logging.info(" Writing file: '{0}'".format(file_name))
1071 with open(file_name, "w") as file_handler:
1072 file_handler.write(",".join(header) + "\n")
1073 for test in tbl_sorted:
1074 file_handler.write(",".join([str(item) for item in test]) + '\n')
1076 txt_file_name = "{0}.txt".format(table["output-file"])
1077 logging.info(" Writing file: '{0}'".format(txt_file_name))
1078 convert_csv_to_pretty_txt(file_name, txt_file_name)
1081 def table_failed_tests_html(table, input_data):
1082 """Generate the table(s) with algorithm: table_failed_tests_html
1083 specified in the specification file.
1085 :param table: Table to generate.
1086 :param input_data: Data to process.
1087 :type table: pandas.Series
1088 :type input_data: InputData
1091 logging.info(" Generating the table {0} ...".
1092 format(table.get("title", "")))
1095 with open(table["input-file"], 'rb') as csv_file:
1096 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1097 csv_lst = [item for item in csv_content]
1099 logging.warning("The input file is not defined.")
1101 except csv.Error as err:
1102 logging.warning("Not possible to process the file '{0}'.\n{1}".
1103 format(table["input-file"], err))
1107 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1110 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1111 for idx, item in enumerate(csv_lst[0]):
1112 alignment = "left" if idx == 0 else "center"
1113 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1117 colors = {"very-bad": ("#ffcccc", "#ff9999"),
1118 "bad": ("#e9f1fb", "#d4e4f7")}
1119 for r_idx, row in enumerate(csv_lst[1:]):
1124 background = colors[color][r_idx % 2]
1125 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1128 for c_idx, item in enumerate(row):
1129 alignment = "left" if c_idx == 0 else "center"
1130 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1133 url = _generate_url("../trending/", item)
1134 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1139 with open(table["output-file"], 'w') as html_file:
1140 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1141 html_file.write(".. raw:: html\n\n\t")
1142 html_file.write(ET.tostring(failed_tests))
1143 html_file.write("\n\t<p><br><br></p>\n")
1145 logging.warning("The output file is not defined.")