1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 def generate_tables(spec, data):
32 """Generate all tables specified in the specification file.
34 :param spec: Specification read from the specification file.
35 :param data: Data to process.
36 :type spec: Specification
40 logging.info("Generating the tables ...")
41 for table in spec.tables:
43 eval(table["algorithm"])(table, data)
44 except NameError as err:
45 logging.error("Probably algorithm '{alg}' is not defined: {err}".
46 format(alg=table["algorithm"], err=repr(err)))
50 def table_details(table, input_data):
51 """Generate the table(s) with algorithm: table_detailed_test_results
52 specified in the specification file.
54 :param table: Table to generate.
55 :param input_data: Data to process.
56 :type table: pandas.Series
57 :type input_data: InputData
60 logging.info(" Generating the table {0} ...".
61 format(table.get("title", "")))
64 logging.info(" Creating the data set for the {0} '{1}'.".
65 format(table.get("type", ""), table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 logging.info(" Creating the data set for the {0} '{1}'.".
133 format(table.get("type", ""), table.get("title", "")))
134 data = input_data.filter_data(table)
135 data = input_data.merge_data(data)
136 data.sort_index(inplace=True)
138 logging.info(" Creating the data set for the {0} '{1}'.".
139 format(table.get("type", ""), table.get("title", "")))
140 suites = input_data.filter_data(table, data_set="suites")
141 suites = input_data.merge_data(suites)
143 # Prepare the header of the tables
145 for column in table["columns"]:
146 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
148 for _, suite in suites.iteritems():
150 suite_name = suite["name"]
152 for test in data.keys():
153 if data[test]["parent"] in suite_name:
155 for column in table["columns"]:
157 col_data = str(data[test][column["data"].
158 split(" ")[1]]).replace('"', '""')
159 if column["data"].split(" ")[1] in ("vat-history",
161 col_data = replace(col_data, " |br| ", "",
163 col_data = " |prein| {0} |preout| ".\
164 format(col_data[:-5])
165 row_lst.append('"{0}"'.format(col_data))
167 row_lst.append("No data")
168 table_lst.append(row_lst)
170 # Write the data to file
172 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173 table["output-file-ext"])
174 logging.info(" Writing file: '{}'".format(file_name))
175 with open(file_name, "w") as file_handler:
176 file_handler.write(",".join(header) + "\n")
177 for item in table_lst:
178 file_handler.write(",".join(item) + "\n")
180 logging.info(" Done.")
183 def table_performance_improvements(table, input_data):
184 """Generate the table(s) with algorithm: table_performance_improvements
185 specified in the specification file.
187 # FIXME: Not used now.
189 :param table: Table to generate.
190 :param input_data: Data to process.
191 :type table: pandas.Series
192 :type input_data: InputData
195 def _write_line_to_file(file_handler, data):
196 """Write a line to the .csv file.
198 :param file_handler: File handler for the csv file. It must be open for
200 :param data: Item to be written to the file.
201 :type file_handler: BinaryIO
207 if isinstance(item["data"], str):
208 # Remove -?drdisc from the end
209 if item["data"].endswith("drdisc"):
210 item["data"] = item["data"][:-8]
211 line_lst.append(item["data"])
212 elif isinstance(item["data"], float):
213 line_lst.append("{:.1f}".format(item["data"]))
214 elif item["data"] is None:
216 file_handler.write(",".join(line_lst) + "\n")
218 logging.info(" Generating the table {0} ...".
219 format(table.get("title", "")))
222 file_name = table.get("template", None)
225 tmpl = _read_csv_template(file_name)
226 except PresentationError:
227 logging.error(" The template '{0}' does not exist. Skipping the "
228 "table.".format(file_name))
231 logging.error("The template is not defined. Skipping the table.")
235 logging.info(" Creating the data set for the {0} '{1}'.".
236 format(table.get("type", ""), table.get("title", "")))
237 data = input_data.filter_data(table)
239 # Prepare the header of the tables
241 for column in table["columns"]:
242 header.append(column["title"])
244 # Generate the data for the table according to the model in the table
247 for tmpl_item in tmpl:
249 for column in table["columns"]:
250 cmd = column["data"].split(" ")[0]
251 args = column["data"].split(" ")[1:]
252 if cmd == "template":
254 val = float(tmpl_item[int(args[0])])
256 val = tmpl_item[int(args[0])]
257 tbl_item.append({"data": val})
263 for build in data[job]:
265 data_lst.append(float(build[tmpl_item[0]]
266 ["throughput"]["value"]))
267 except (KeyError, TypeError):
271 tbl_item.append({"data": (eval(operation)(data_lst)) /
274 tbl_item.append({"data": None})
275 elif cmd == "operation":
278 nr1 = float(tbl_item[int(args[1])]["data"])
279 nr2 = float(tbl_item[int(args[2])]["data"])
281 tbl_item.append({"data": eval(operation)(nr1, nr2)})
283 tbl_item.append({"data": None})
284 except (IndexError, ValueError, TypeError):
285 logging.error("No data for {0}".format(tbl_item[0]["data"]))
286 tbl_item.append({"data": None})
289 logging.error("Not supported command {0}. Skipping the table.".
292 tbl_lst.append(tbl_item)
294 # Sort the table according to the relative change
295 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
297 # Create the tables and write them to the files
299 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
302 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
305 for file_name in file_names:
306 logging.info(" Writing the file '{0}'".format(file_name))
307 with open(file_name, "w") as file_handler:
308 file_handler.write(",".join(header) + "\n")
310 if isinstance(item[-1]["data"], float):
311 rel_change = round(item[-1]["data"], 1)
313 rel_change = item[-1]["data"]
314 if "ndr_top" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change >= 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_top" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change >= 10.0:
321 _write_line_to_file(file_handler, item)
322 elif "ndr_low" in file_name \
323 and "ndr" in item[0]["data"] \
324 and rel_change < 10.0:
325 _write_line_to_file(file_handler, item)
326 elif "pdr_low" in file_name \
327 and "pdr" in item[0]["data"] \
328 and rel_change < 10.0:
329 _write_line_to_file(file_handler, item)
331 logging.info(" Done.")
334 def _read_csv_template(file_name):
335 """Read the template from a .csv file.
337 :param file_name: Name / full path / relative path of the file to read.
339 :returns: Data from the template as list (lines) of lists (items on line).
341 :raises: PresentationError if it is not possible to read the file.
345 with open(file_name, 'r') as csv_file:
347 for line in csv_file:
348 tmpl_data.append(line[:-1].split(","))
350 except IOError as err:
351 raise PresentationError(str(err), level="ERROR")
354 def table_performance_comparison(table, input_data):
355 """Generate the table(s) with algorithm: table_performance_comparison
356 specified in the specification file.
358 :param table: Table to generate.
359 :param input_data: Data to process.
360 :type table: pandas.Series
361 :type input_data: InputData
364 logging.info(" Generating the table {0} ...".
365 format(table.get("title", "")))
368 logging.info(" Creating the data set for the {0} '{1}'.".
369 format(table.get("type", ""), table.get("title", "")))
370 data = input_data.filter_data(table, continue_on_error=True)
372 # Prepare the header of the tables
374 header = ["Test case", ]
376 history = table.get("history", None)
380 ["{0} Throughput [Mpps]".format(item["title"]),
381 "{0} Stdev [Mpps]".format(item["title"])])
383 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
384 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
385 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
386 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
388 header_str = ",".join(header) + "\n"
389 except (AttributeError, KeyError) as err:
390 logging.error("The model is invalid, missing parameter: {0}".
394 # Prepare data to the table:
396 for job, builds in table["reference"]["data"].items():
398 for tst_name, tst_data in data[job][str(build)].iteritems():
399 if tbl_dict.get(tst_name, None) is None:
400 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
401 "-".join(tst_data["name"].
403 tbl_dict[tst_name] = {"name": name,
407 tbl_dict[tst_name]["ref-data"].\
408 append(tst_data["throughput"]["value"])
410 pass # No data in output.xml for this test
412 for job, builds in table["compare"]["data"].items():
414 for tst_name, tst_data in data[job][str(build)].iteritems():
416 tbl_dict[tst_name]["cmp-data"].\
417 append(tst_data["throughput"]["value"])
421 tbl_dict.pop(tst_name, None)
424 for job, builds in item["data"].items():
426 for tst_name, tst_data in data[job][str(build)].iteritems():
427 if tbl_dict.get(tst_name, None) is None:
429 if tbl_dict[tst_name].get("history", None) is None:
430 tbl_dict[tst_name]["history"] = OrderedDict()
431 if tbl_dict[tst_name]["history"].get(item["title"],
433 tbl_dict[tst_name]["history"][item["title"]] = \
436 tbl_dict[tst_name]["history"][item["title"]].\
437 append(tst_data["throughput"]["value"])
438 except (TypeError, KeyError):
442 for tst_name in tbl_dict.keys():
443 item = [tbl_dict[tst_name]["name"], ]
445 if tbl_dict[tst_name].get("history", None) is not None:
446 for hist_data in tbl_dict[tst_name]["history"].values():
448 item.append(round(mean(hist_data) / 1000000, 2))
449 item.append(round(stdev(hist_data) / 1000000, 2))
451 item.extend([None, None])
453 item.extend([None, None])
454 data_t = tbl_dict[tst_name]["ref-data"]
456 item.append(round(mean(data_t) / 1000000, 2))
457 item.append(round(stdev(data_t) / 1000000, 2))
459 item.extend([None, None])
460 data_t = tbl_dict[tst_name]["cmp-data"]
462 item.append(round(mean(data_t) / 1000000, 2))
463 item.append(round(stdev(data_t) / 1000000, 2))
465 item.extend([None, None])
466 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
467 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
468 if len(item) == len(header):
471 # Sort the table according to the relative change
472 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
476 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
477 table["output-file-ext"]),
478 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
479 table["output-file-ext"]),
480 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
481 table["output-file-ext"]),
482 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
483 table["output-file-ext"]),
484 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
485 table["output-file-ext"]),
486 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
487 table["output-file-ext"])
489 for file_name in tbl_names:
490 logging.info(" Writing file: '{0}'".format(file_name))
491 with open(file_name, "w") as file_handler:
492 file_handler.write(header_str)
494 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
495 file_name.split("-")[-2] in test[0]): # cores
496 test[0] = "-".join(test[0].split("-")[:-1])
497 file_handler.write(",".join([str(item) for item in test]) +
501 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
502 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
503 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
504 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
505 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
506 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
509 for i, txt_name in enumerate(tbl_names_txt):
510 logging.info(" Writing file: '{0}'".format(txt_name))
511 convert_csv_to_pretty_txt(tbl_names[i], txt_name)
513 # Selected tests in csv:
514 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
515 table["output-file-ext"])
516 with open(input_file, "r") as in_file:
521 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
522 table["output-file-ext"])
523 logging.info(" Writing file: '{0}'".format(output_file))
524 with open(output_file, "w") as out_file:
525 out_file.write(header_str)
526 for i, line in enumerate(lines[1:]):
527 if i == table["nr-of-tests-shown"]:
531 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
532 table["output-file-ext"])
533 logging.info(" Writing file: '{0}'".format(output_file))
534 with open(output_file, "w") as out_file:
535 out_file.write(header_str)
536 for i, line in enumerate(lines[-1:0:-1]):
537 if i == table["nr-of-tests-shown"]:
541 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
542 table["output-file-ext"])
543 with open(input_file, "r") as in_file:
548 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
549 table["output-file-ext"])
550 logging.info(" Writing file: '{0}'".format(output_file))
551 with open(output_file, "w") as out_file:
552 out_file.write(header_str)
553 for i, line in enumerate(lines[1:]):
554 if i == table["nr-of-tests-shown"]:
558 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
559 table["output-file-ext"])
560 logging.info(" Writing file: '{0}'".format(output_file))
561 with open(output_file, "w") as out_file:
562 out_file.write(header_str)
563 for i, line in enumerate(lines[-1:0:-1]):
564 if i == table["nr-of-tests-shown"]:
569 def table_performance_comparison_mrr(table, input_data):
570 """Generate the table(s) with algorithm: table_performance_comparison_mrr
571 specified in the specification file.
573 :param table: Table to generate.
574 :param input_data: Data to process.
575 :type table: pandas.Series
576 :type input_data: InputData
579 logging.info(" Generating the table {0} ...".
580 format(table.get("title", "")))
583 logging.info(" Creating the data set for the {0} '{1}'.".
584 format(table.get("type", ""), table.get("title", "")))
585 data = input_data.filter_data(table, continue_on_error=True)
587 # Prepare the header of the tables
589 header = ["Test case",
590 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
591 "{0} stdev [Mpps]".format(table["reference"]["title"]),
592 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
593 "{0} stdev [Mpps]".format(table["compare"]["title"]),
595 header_str = ",".join(header) + "\n"
596 except (AttributeError, KeyError) as err:
597 logging.error("The model is invalid, missing parameter: {0}".
601 # Prepare data to the table:
603 for job, builds in table["reference"]["data"].items():
605 for tst_name, tst_data in data[job][str(build)].iteritems():
606 if tbl_dict.get(tst_name, None) is None:
607 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
608 "-".join(tst_data["name"].
610 tbl_dict[tst_name] = {"name": name,
614 tbl_dict[tst_name]["ref-data"].\
615 append(tst_data["result"]["receive-rate"].avg)
617 pass # No data in output.xml for this test
619 for job, builds in table["compare"]["data"].items():
621 for tst_name, tst_data in data[job][str(build)].iteritems():
623 tbl_dict[tst_name]["cmp-data"].\
624 append(tst_data["result"]["receive-rate"].avg)
628 tbl_dict.pop(tst_name, None)
631 for tst_name in tbl_dict.keys():
632 item = [tbl_dict[tst_name]["name"], ]
633 data_t = tbl_dict[tst_name]["ref-data"]
635 item.append(round(mean(data_t) / 1000000, 2))
636 item.append(round(stdev(data_t) / 1000000, 2))
638 item.extend([None, None])
639 data_t = tbl_dict[tst_name]["cmp-data"]
641 item.append(round(mean(data_t) / 1000000, 2))
642 item.append(round(stdev(data_t) / 1000000, 2))
644 item.extend([None, None])
645 if item[1] is not None and item[3] is not None and item[1] != 0:
646 item.append(int(relative_change(float(item[1]), float(item[3]))))
650 # Sort the table according to the relative change
651 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
655 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
656 table["output-file-ext"]),
657 "{0}-2t2c-full{1}".format(table["output-file"],
658 table["output-file-ext"]),
659 "{0}-4t4c-full{1}".format(table["output-file"],
660 table["output-file-ext"])
662 for file_name in tbl_names:
663 logging.info(" Writing file: '{0}'".format(file_name))
664 with open(file_name, "w") as file_handler:
665 file_handler.write(header_str)
667 if file_name.split("-")[-2] in test[0]: # cores
668 test[0] = "-".join(test[0].split("-")[:-1])
669 file_handler.write(",".join([str(item) for item in test]) +
673 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
674 "{0}-2t2c-full.txt".format(table["output-file"]),
675 "{0}-4t4c-full.txt".format(table["output-file"])
678 for i, txt_name in enumerate(tbl_names_txt):
679 logging.info(" Writing file: '{0}'".format(txt_name))
680 convert_csv_to_pretty_txt(tbl_names[i], txt_name)
683 def table_performance_trending_dashboard(table, input_data):
684 """Generate the table(s) with algorithm:
685 table_performance_trending_dashboard
686 specified in the specification file.
688 :param table: Table to generate.
689 :param input_data: Data to process.
690 :type table: pandas.Series
691 :type input_data: InputData
694 logging.info(" Generating the table {0} ...".
695 format(table.get("title", "")))
698 logging.info(" Creating the data set for the {0} '{1}'.".
699 format(table.get("type", ""), table.get("title", "")))
700 data = input_data.filter_data(table, continue_on_error=True)
702 # Prepare the header of the tables
703 header = ["Test Case",
705 "Short-Term Change [%]",
706 "Long-Term Change [%]",
710 header_str = ",".join(header) + "\n"
712 # Prepare data to the table:
714 for job, builds in table["data"].items():
716 for tst_name, tst_data in data[job][str(build)].iteritems():
717 if tst_name.lower() in table["ignore-list"]:
719 if tbl_dict.get(tst_name, None) is None:
720 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
722 tbl_dict[tst_name] = {"name": name,
723 "data": OrderedDict()}
725 tbl_dict[tst_name]["data"][str(build)] = \
726 tst_data["result"]["receive-rate"]
727 except (TypeError, KeyError):
728 pass # No data in output.xml for this test
731 for tst_name in tbl_dict.keys():
732 data_t = tbl_dict[tst_name]["data"]
736 classification_lst, avgs = classify_anomalies(data_t)
738 win_size = min(len(data_t), table["window"])
739 long_win_size = min(len(data_t), table["long-trend-window"])
743 [x for x in avgs[-long_win_size:-win_size]
748 avg_week_ago = avgs[max(-win_size, -len(avgs))]
750 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
751 rel_change_last = nan
753 rel_change_last = round(
754 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
756 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
757 rel_change_long = nan
759 rel_change_long = round(
760 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
762 if classification_lst:
763 if isnan(rel_change_last) and isnan(rel_change_long):
766 [tbl_dict[tst_name]["name"],
767 '-' if isnan(last_avg) else
768 round(last_avg / 1000000, 2),
769 '-' if isnan(rel_change_last) else rel_change_last,
770 '-' if isnan(rel_change_long) else rel_change_long,
771 classification_lst[-win_size:].count("regression"),
772 classification_lst[-win_size:].count("progression")])
774 tbl_lst.sort(key=lambda rel: rel[0])
777 for nrr in range(table["window"], -1, -1):
778 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
779 for nrp in range(table["window"], -1, -1):
780 tbl_out = [item for item in tbl_reg if item[5] == nrp]
781 tbl_out.sort(key=lambda rel: rel[2])
782 tbl_sorted.extend(tbl_out)
784 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
786 logging.info(" Writing file: '{0}'".format(file_name))
787 with open(file_name, "w") as file_handler:
788 file_handler.write(header_str)
789 for test in tbl_sorted:
790 file_handler.write(",".join([str(item) for item in test]) + '\n')
792 txt_file_name = "{0}.txt".format(table["output-file"])
793 logging.info(" Writing file: '{0}'".format(txt_file_name))
794 convert_csv_to_pretty_txt(file_name, txt_file_name)
797 def _generate_url(base, test_name):
798 """Generate URL to a trending plot from the name of the test case.
800 :param base: The base part of URL common to all test cases.
801 :param test_name: The name of the test case.
804 :returns: The URL to the plot with the trending data for the given test
814 if "lbdpdk" in test_name or "lbvpp" in test_name:
815 file_name = "link_bonding.html"
817 elif "testpmd" in test_name or "l3fwd" in test_name:
818 file_name = "dpdk.html"
820 elif "memif" in test_name:
821 file_name = "container_memif.html"
823 elif "srv6" in test_name:
824 file_name = "srv6.html"
826 elif "vhost" in test_name:
827 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
828 file_name = "vm_vhost_l2.html"
829 elif "ip4base" in test_name:
830 file_name = "vm_vhost_ip4.html"
832 elif "ipsec" in test_name:
833 file_name = "ipsec.html"
835 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
836 file_name = "ip4_tunnels.html"
838 elif "ip4base" in test_name or "ip4scale" in test_name:
839 file_name = "ip4.html"
840 if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
841 feature = "-features"
843 elif "ip6base" in test_name or "ip6scale" in test_name:
844 file_name = "ip6.html"
846 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
847 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
848 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
849 file_name = "l2.html"
850 if "iacl" in test_name:
851 feature = "-features"
853 if "x520" in test_name:
855 elif "x710" in test_name:
857 elif "xl710" in test_name:
860 if "64b" in test_name:
862 elif "78b" in test_name:
864 elif "imix" in test_name:
866 elif "9000b" in test_name:
868 elif "1518" in test_name:
871 if "1t1c" in test_name:
873 elif "2t2c" in test_name:
875 elif "4t4c" in test_name:
878 return url + file_name + anchor + feature
881 def table_performance_trending_dashboard_html(table, input_data):
882 """Generate the table(s) with algorithm:
883 table_performance_trending_dashboard_html specified in the specification
886 :param table: Table to generate.
887 :param input_data: Data to process.
888 :type table: pandas.Series
889 :type input_data: InputData
892 logging.info(" Generating the table {0} ...".
893 format(table.get("title", "")))
896 with open(table["input-file"], 'rb') as csv_file:
897 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
898 csv_lst = [item for item in csv_content]
900 logging.warning("The input file is not defined.")
902 except csv.Error as err:
903 logging.warning("Not possible to process the file '{0}'.\n{1}".
904 format(table["input-file"], err))
908 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
911 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
912 for idx, item in enumerate(csv_lst[0]):
913 alignment = "left" if idx == 0 else "center"
914 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
918 colors = {"regression": ("#ffcccc", "#ff9999"),
919 "progression": ("#c6ecc6", "#9fdf9f"),
920 "normal": ("#e9f1fb", "#d4e4f7")}
921 for r_idx, row in enumerate(csv_lst[1:]):
925 color = "progression"
928 background = colors[color][r_idx % 2]
929 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
932 for c_idx, item in enumerate(row):
933 alignment = "left" if c_idx == 0 else "center"
934 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
937 url = _generate_url("../trending/", item)
938 ref = ET.SubElement(td, "a", attrib=dict(href=url))
943 with open(table["output-file"], 'w') as html_file:
944 logging.info(" Writing file: '{0}'".format(table["output-file"]))
945 html_file.write(".. raw:: html\n\n\t")
946 html_file.write(ET.tostring(dashboard))
947 html_file.write("\n\t<p><br><br></p>\n")
949 logging.warning("The output file is not defined.")
953 def table_failed_tests(table, input_data):
954 """Generate the table(s) with algorithm: table_failed_tests
955 specified in the specification file.
957 :param table: Table to generate.
958 :param input_data: Data to process.
959 :type table: pandas.Series
960 :type input_data: InputData
963 logging.info(" Generating the table {0} ...".
964 format(table.get("title", "")))
967 logging.info(" Creating the data set for the {0} '{1}'.".
968 format(table.get("type", ""), table.get("title", "")))
969 data = input_data.filter_data(table, continue_on_error=True)
971 # Prepare the header of the tables
972 header = ["Test Case",
974 "Last Failure [Time]",
975 "Last Failure [VPP-Build-Id]",
976 "Last Failure [CSIT-Job-Build-Id]"]
978 # Generate the data for the table according to the model in the table
981 for job, builds in table["data"].items():
984 for tst_name, tst_data in data[job][build].iteritems():
985 if tst_name.lower() in table["ignore-list"]:
987 if tbl_dict.get(tst_name, None) is None:
988 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
990 tbl_dict[tst_name] = {"name": name,
991 "data": OrderedDict()}
993 tbl_dict[tst_name]["data"][build] = (
995 input_data.metadata(job, build).get("generated", ""),
996 input_data.metadata(job, build).get("version", ""),
998 except (TypeError, KeyError):
999 pass # No data in output.xml for this test
1002 for tst_data in tbl_dict.values():
1003 win_size = min(len(tst_data["data"]), table["window"])
1005 for val in tst_data["data"].values()[-win_size:]:
1006 if val[0] == "FAIL":
1008 fails_last_date = val[1]
1009 fails_last_vpp = val[2]
1010 fails_last_csit = val[3]
1012 tbl_lst.append([tst_data["name"],
1016 "mrr-daily-build-{0}".format(fails_last_csit)])
1018 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1020 for nrf in range(table["window"], -1, -1):
1021 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1022 tbl_sorted.extend(tbl_fails)
1023 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1025 logging.info(" Writing file: '{0}'".format(file_name))
1026 with open(file_name, "w") as file_handler:
1027 file_handler.write(",".join(header) + "\n")
1028 for test in tbl_sorted:
1029 file_handler.write(",".join([str(item) for item in test]) + '\n')
1031 txt_file_name = "{0}.txt".format(table["output-file"])
1032 logging.info(" Writing file: '{0}'".format(txt_file_name))
1033 convert_csv_to_pretty_txt(file_name, txt_file_name)
1036 def table_failed_tests_html(table, input_data):
1037 """Generate the table(s) with algorithm: table_failed_tests_html
1038 specified in the specification file.
1040 :param table: Table to generate.
1041 :param input_data: Data to process.
1042 :type table: pandas.Series
1043 :type input_data: InputData
1046 logging.info(" Generating the table {0} ...".
1047 format(table.get("title", "")))
1050 with open(table["input-file"], 'rb') as csv_file:
1051 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1052 csv_lst = [item for item in csv_content]
1054 logging.warning("The input file is not defined.")
1056 except csv.Error as err:
1057 logging.warning("Not possible to process the file '{0}'.\n{1}".
1058 format(table["input-file"], err))
1062 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1065 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1066 for idx, item in enumerate(csv_lst[0]):
1067 alignment = "left" if idx == 0 else "center"
1068 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1072 colors = ("#e9f1fb", "#d4e4f7")
1073 for r_idx, row in enumerate(csv_lst[1:]):
1074 background = colors[r_idx % 2]
1075 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1078 for c_idx, item in enumerate(row):
1079 alignment = "left" if c_idx == 0 else "center"
1080 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1083 url = _generate_url("../trending/", item)
1084 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1089 with open(table["output-file"], 'w') as html_file:
1090 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1091 html_file.write(".. raw:: html\n\n\t")
1092 html_file.write(ET.tostring(failed_tests))
1093 html_file.write("\n\t<p><br><br></p>\n")
1095 logging.warning("The output file is not defined.")