1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 def generate_tables(spec, data):
32 """Generate all tables specified in the specification file.
34 :param spec: Specification read from the specification file.
35 :param data: Data to process.
36 :type spec: Specification
40 logging.info("Generating the tables ...")
41 for table in spec.tables:
43 eval(table["algorithm"])(table, data)
44 except NameError as err:
45 logging.error("Probably algorithm '{alg}' is not defined: {err}".
46 format(alg=table["algorithm"], err=repr(err)))
50 def table_details(table, input_data):
51 """Generate the table(s) with algorithm: table_detailed_test_results
52 specified in the specification file.
54 :param table: Table to generate.
55 :param input_data: Data to process.
56 :type table: pandas.Series
57 :type input_data: InputData
60 logging.info(" Generating the table {0} ...".
61 format(table.get("title", "")))
64 logging.info(" Creating the data set for the {0} '{1}'.".
65 format(table.get("type", ""), table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 logging.info(" Creating the data set for the {0} '{1}'.".
133 format(table.get("type", ""), table.get("title", "")))
134 data = input_data.filter_data(table)
135 data = input_data.merge_data(data)
136 data.sort_index(inplace=True)
138 logging.info(" Creating the data set for the {0} '{1}'.".
139 format(table.get("type", ""), table.get("title", "")))
140 suites = input_data.filter_data(table, data_set="suites")
141 suites = input_data.merge_data(suites)
143 # Prepare the header of the tables
145 for column in table["columns"]:
146 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
148 for _, suite in suites.iteritems():
150 suite_name = suite["name"]
152 for test in data.keys():
153 if data[test]["parent"] in suite_name:
155 for column in table["columns"]:
157 col_data = str(data[test][column["data"].
158 split(" ")[1]]).replace('"', '""')
159 if column["data"].split(" ")[1] in ("vat-history",
161 col_data = replace(col_data, " |br| ", "",
163 col_data = " |prein| {0} |preout| ".\
164 format(col_data[:-5])
165 row_lst.append('"{0}"'.format(col_data))
167 row_lst.append("No data")
168 table_lst.append(row_lst)
170 # Write the data to file
172 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173 table["output-file-ext"])
174 logging.info(" Writing file: '{}'".format(file_name))
175 with open(file_name, "w") as file_handler:
176 file_handler.write(",".join(header) + "\n")
177 for item in table_lst:
178 file_handler.write(",".join(item) + "\n")
180 logging.info(" Done.")
183 def table_performance_improvements(table, input_data):
184 """Generate the table(s) with algorithm: table_performance_improvements
185 specified in the specification file.
187 # FIXME: Not used now.
189 :param table: Table to generate.
190 :param input_data: Data to process.
191 :type table: pandas.Series
192 :type input_data: InputData
195 def _write_line_to_file(file_handler, data):
196 """Write a line to the .csv file.
198 :param file_handler: File handler for the csv file. It must be open for
200 :param data: Item to be written to the file.
201 :type file_handler: BinaryIO
207 if isinstance(item["data"], str):
208 # Remove -?drdisc from the end
209 if item["data"].endswith("drdisc"):
210 item["data"] = item["data"][:-8]
211 line_lst.append(item["data"])
212 elif isinstance(item["data"], float):
213 line_lst.append("{:.1f}".format(item["data"]))
214 elif item["data"] is None:
216 file_handler.write(",".join(line_lst) + "\n")
218 logging.info(" Generating the table {0} ...".
219 format(table.get("title", "")))
222 file_name = table.get("template", None)
225 tmpl = _read_csv_template(file_name)
226 except PresentationError:
227 logging.error(" The template '{0}' does not exist. Skipping the "
228 "table.".format(file_name))
231 logging.error("The template is not defined. Skipping the table.")
235 logging.info(" Creating the data set for the {0} '{1}'.".
236 format(table.get("type", ""), table.get("title", "")))
237 data = input_data.filter_data(table)
239 # Prepare the header of the tables
241 for column in table["columns"]:
242 header.append(column["title"])
244 # Generate the data for the table according to the model in the table
247 for tmpl_item in tmpl:
249 for column in table["columns"]:
250 cmd = column["data"].split(" ")[0]
251 args = column["data"].split(" ")[1:]
252 if cmd == "template":
254 val = float(tmpl_item[int(args[0])])
256 val = tmpl_item[int(args[0])]
257 tbl_item.append({"data": val})
263 for build in data[job]:
265 data_lst.append(float(build[tmpl_item[0]]
266 ["throughput"]["value"]))
267 except (KeyError, TypeError):
271 tbl_item.append({"data": (eval(operation)(data_lst)) /
274 tbl_item.append({"data": None})
275 elif cmd == "operation":
278 nr1 = float(tbl_item[int(args[1])]["data"])
279 nr2 = float(tbl_item[int(args[2])]["data"])
281 tbl_item.append({"data": eval(operation)(nr1, nr2)})
283 tbl_item.append({"data": None})
284 except (IndexError, ValueError, TypeError):
285 logging.error("No data for {0}".format(tbl_item[0]["data"]))
286 tbl_item.append({"data": None})
289 logging.error("Not supported command {0}. Skipping the table.".
292 tbl_lst.append(tbl_item)
294 # Sort the table according to the relative change
295 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
297 # Create the tables and write them to the files
299 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
302 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
305 for file_name in file_names:
306 logging.info(" Writing the file '{0}'".format(file_name))
307 with open(file_name, "w") as file_handler:
308 file_handler.write(",".join(header) + "\n")
310 if isinstance(item[-1]["data"], float):
311 rel_change = round(item[-1]["data"], 1)
313 rel_change = item[-1]["data"]
314 if "ndr_top" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change >= 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_top" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change >= 10.0:
321 _write_line_to_file(file_handler, item)
322 elif "ndr_low" in file_name \
323 and "ndr" in item[0]["data"] \
324 and rel_change < 10.0:
325 _write_line_to_file(file_handler, item)
326 elif "pdr_low" in file_name \
327 and "pdr" in item[0]["data"] \
328 and rel_change < 10.0:
329 _write_line_to_file(file_handler, item)
331 logging.info(" Done.")
334 def _read_csv_template(file_name):
335 """Read the template from a .csv file.
337 :param file_name: Name / full path / relative path of the file to read.
339 :returns: Data from the template as list (lines) of lists (items on line).
341 :raises: PresentationError if it is not possible to read the file.
345 with open(file_name, 'r') as csv_file:
347 for line in csv_file:
348 tmpl_data.append(line[:-1].split(","))
350 except IOError as err:
351 raise PresentationError(str(err), level="ERROR")
354 def table_performance_comparison(table, input_data):
355 """Generate the table(s) with algorithm: table_performance_comparison
356 specified in the specification file.
358 :param table: Table to generate.
359 :param input_data: Data to process.
360 :type table: pandas.Series
361 :type input_data: InputData
364 logging.info(" Generating the table {0} ...".
365 format(table.get("title", "")))
368 logging.info(" Creating the data set for the {0} '{1}'.".
369 format(table.get("type", ""), table.get("title", "")))
370 data = input_data.filter_data(table, continue_on_error=True)
372 # Prepare the header of the tables
374 header = ["Test case", ]
376 history = table.get("history", None)
380 ["{0} Throughput [Mpps]".format(item["title"]),
381 "{0} Stdev [Mpps]".format(item["title"])])
383 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
384 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
385 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
386 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
388 header_str = ",".join(header) + "\n"
389 except (AttributeError, KeyError) as err:
390 logging.error("The model is invalid, missing parameter: {0}".
394 # Prepare data to the table:
396 for job, builds in table["reference"]["data"].items():
398 for tst_name, tst_data in data[job][str(build)].iteritems():
399 if tbl_dict.get(tst_name, None) is None:
400 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
401 "-".join(tst_data["name"].
403 tbl_dict[tst_name] = {"name": name,
407 tbl_dict[tst_name]["ref-data"].\
408 append(tst_data["throughput"]["value"])
410 pass # No data in output.xml for this test
412 for job, builds in table["compare"]["data"].items():
414 for tst_name, tst_data in data[job][str(build)].iteritems():
416 tbl_dict[tst_name]["cmp-data"].\
417 append(tst_data["throughput"]["value"])
421 tbl_dict.pop(tst_name, None)
424 for job, builds in item["data"].items():
426 for tst_name, tst_data in data[job][str(build)].iteritems():
427 if tbl_dict.get(tst_name, None) is None:
429 if tbl_dict[tst_name].get("history", None) is None:
430 tbl_dict[tst_name]["history"] = OrderedDict()
431 if tbl_dict[tst_name]["history"].get(item["title"],
433 tbl_dict[tst_name]["history"][item["title"]] = \
436 tbl_dict[tst_name]["history"][item["title"]].\
437 append(tst_data["throughput"]["value"])
438 except (TypeError, KeyError):
442 for tst_name in tbl_dict.keys():
443 item = [tbl_dict[tst_name]["name"], ]
445 if tbl_dict[tst_name].get("history", None) is not None:
446 for hist_data in tbl_dict[tst_name]["history"].values():
448 item.append(round(mean(hist_data) / 1000000, 2))
449 item.append(round(stdev(hist_data) / 1000000, 2))
451 item.extend([None, None])
453 item.extend([None, None])
454 data_t = tbl_dict[tst_name]["ref-data"]
456 item.append(round(mean(data_t) / 1000000, 2))
457 item.append(round(stdev(data_t) / 1000000, 2))
459 item.extend([None, None])
460 data_t = tbl_dict[tst_name]["cmp-data"]
462 item.append(round(mean(data_t) / 1000000, 2))
463 item.append(round(stdev(data_t) / 1000000, 2))
465 item.extend([None, None])
466 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
467 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
468 if len(item) == len(header):
471 # Sort the table according to the relative change
472 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
476 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
477 table["output-file-ext"]),
478 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
479 table["output-file-ext"]),
480 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
481 table["output-file-ext"]),
482 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
483 table["output-file-ext"]),
484 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
485 table["output-file-ext"]),
486 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
487 table["output-file-ext"])
489 for file_name in tbl_names:
490 logging.info(" Writing file: '{0}'".format(file_name))
491 with open(file_name, "w") as file_handler:
492 file_handler.write(header_str)
494 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
495 file_name.split("-")[-2] in test[0]): # cores
496 test[0] = "-".join(test[0].split("-")[:-1])
497 file_handler.write(",".join([str(item) for item in test]) +
501 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
502 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
503 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
504 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
505 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
506 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
509 for i, txt_name in enumerate(tbl_names_txt):
510 logging.info(" Writing file: '{0}'".format(txt_name))
511 convert_csv_to_pretty_txt(tbl_names[i], txt_name)
513 # Selected tests in csv:
514 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
515 table["output-file-ext"])
516 with open(input_file, "r") as in_file:
521 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
522 table["output-file-ext"])
523 logging.info(" Writing file: '{0}'".format(output_file))
524 with open(output_file, "w") as out_file:
525 out_file.write(header_str)
526 for i, line in enumerate(lines[1:]):
527 if i == table["nr-of-tests-shown"]:
531 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
532 table["output-file-ext"])
533 logging.info(" Writing file: '{0}'".format(output_file))
534 with open(output_file, "w") as out_file:
535 out_file.write(header_str)
536 for i, line in enumerate(lines[-1:0:-1]):
537 if i == table["nr-of-tests-shown"]:
541 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
542 table["output-file-ext"])
543 with open(input_file, "r") as in_file:
548 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
549 table["output-file-ext"])
550 logging.info(" Writing file: '{0}'".format(output_file))
551 with open(output_file, "w") as out_file:
552 out_file.write(header_str)
553 for i, line in enumerate(lines[1:]):
554 if i == table["nr-of-tests-shown"]:
558 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
559 table["output-file-ext"])
560 logging.info(" Writing file: '{0}'".format(output_file))
561 with open(output_file, "w") as out_file:
562 out_file.write(header_str)
563 for i, line in enumerate(lines[-1:0:-1]):
564 if i == table["nr-of-tests-shown"]:
569 def table_performance_comparison_mrr(table, input_data):
570 """Generate the table(s) with algorithm: table_performance_comparison_mrr
571 specified in the specification file.
573 :param table: Table to generate.
574 :param input_data: Data to process.
575 :type table: pandas.Series
576 :type input_data: InputData
579 logging.info(" Generating the table {0} ...".
580 format(table.get("title", "")))
583 logging.info(" Creating the data set for the {0} '{1}'.".
584 format(table.get("type", ""), table.get("title", "")))
585 data = input_data.filter_data(table, continue_on_error=True)
587 # Prepare the header of the tables
589 header = ["Test case",
590 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
591 "{0} stdev [Mpps]".format(table["reference"]["title"]),
592 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
593 "{0} stdev [Mpps]".format(table["compare"]["title"]),
595 header_str = ",".join(header) + "\n"
596 except (AttributeError, KeyError) as err:
597 logging.error("The model is invalid, missing parameter: {0}".
601 # Prepare data to the table:
603 for job, builds in table["reference"]["data"].items():
605 for tst_name, tst_data in data[job][str(build)].iteritems():
606 if tbl_dict.get(tst_name, None) is None:
607 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
608 "-".join(tst_data["name"].
610 tbl_dict[tst_name] = {"name": name,
614 tbl_dict[tst_name]["ref-data"].\
615 append(tst_data["result"]["receive-rate"].avg)
617 pass # No data in output.xml for this test
619 for job, builds in table["compare"]["data"].items():
621 for tst_name, tst_data in data[job][str(build)].iteritems():
623 tbl_dict[tst_name]["cmp-data"].\
624 append(tst_data["result"]["receive-rate"].avg)
628 tbl_dict.pop(tst_name, None)
631 for tst_name in tbl_dict.keys():
632 item = [tbl_dict[tst_name]["name"], ]
633 data_t = tbl_dict[tst_name]["ref-data"]
635 item.append(round(mean(data_t) / 1000000, 2))
636 item.append(round(stdev(data_t) / 1000000, 2))
638 item.extend([None, None])
639 data_t = tbl_dict[tst_name]["cmp-data"]
641 item.append(round(mean(data_t) / 1000000, 2))
642 item.append(round(stdev(data_t) / 1000000, 2))
644 item.extend([None, None])
645 if item[1] is not None and item[3] is not None and item[1] != 0:
646 item.append(int(relative_change(float(item[1]), float(item[3]))))
650 # Sort the table according to the relative change
651 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
655 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
656 table["output-file-ext"]),
657 "{0}-2t2c-full{1}".format(table["output-file"],
658 table["output-file-ext"]),
659 "{0}-4t4c-full{1}".format(table["output-file"],
660 table["output-file-ext"])
662 for file_name in tbl_names:
663 logging.info(" Writing file: '{0}'".format(file_name))
664 with open(file_name, "w") as file_handler:
665 file_handler.write(header_str)
667 if file_name.split("-")[-2] in test[0]: # cores
668 test[0] = "-".join(test[0].split("-")[:-1])
669 file_handler.write(",".join([str(item) for item in test]) +
673 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
674 "{0}-2t2c-full.txt".format(table["output-file"]),
675 "{0}-4t4c-full.txt".format(table["output-file"])
678 for i, txt_name in enumerate(tbl_names_txt):
679 logging.info(" Writing file: '{0}'".format(txt_name))
680 convert_csv_to_pretty_txt(tbl_names[i], txt_name)
683 def table_performance_trending_dashboard(table, input_data):
684 """Generate the table(s) with algorithm:
685 table_performance_trending_dashboard
686 specified in the specification file.
688 :param table: Table to generate.
689 :param input_data: Data to process.
690 :type table: pandas.Series
691 :type input_data: InputData
694 logging.info(" Generating the table {0} ...".
695 format(table.get("title", "")))
698 logging.info(" Creating the data set for the {0} '{1}'.".
699 format(table.get("type", ""), table.get("title", "")))
700 data = input_data.filter_data(table, continue_on_error=True)
702 # Prepare the header of the tables
703 header = ["Test Case",
705 "Short-Term Change [%]",
706 "Long-Term Change [%]",
710 header_str = ",".join(header) + "\n"
712 # Prepare data to the table:
714 for job, builds in table["data"].items():
716 for tst_name, tst_data in data[job][str(build)].iteritems():
717 if tst_name.lower() in table["ignore-list"]:
719 if tbl_dict.get(tst_name, None) is None:
720 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
721 "-".join(tst_data["name"].
723 tbl_dict[tst_name] = {"name": name,
724 "data": OrderedDict()}
726 tbl_dict[tst_name]["data"][str(build)] = \
727 tst_data["result"]["receive-rate"]
728 except (TypeError, KeyError):
729 pass # No data in output.xml for this test
732 for tst_name in tbl_dict.keys():
733 data_t = tbl_dict[tst_name]["data"]
737 classification_lst, avgs = classify_anomalies(data_t)
739 win_size = min(len(data_t), table["window"])
740 long_win_size = min(len(data_t), table["long-trend-window"])
744 [x for x in avgs[-long_win_size:-win_size]
749 avg_week_ago = avgs[max(-win_size, -len(avgs))]
751 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
752 rel_change_last = nan
754 rel_change_last = round(
755 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
757 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
758 rel_change_long = nan
760 rel_change_long = round(
761 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
763 if classification_lst:
764 if isnan(rel_change_last) and isnan(rel_change_long):
767 [tbl_dict[tst_name]["name"],
768 '-' if isnan(last_avg) else
769 round(last_avg / 1000000, 2),
770 '-' if isnan(rel_change_last) else rel_change_last,
771 '-' if isnan(rel_change_long) else rel_change_long,
772 classification_lst[-win_size:].count("regression"),
773 classification_lst[-win_size:].count("progression")])
775 tbl_lst.sort(key=lambda rel: rel[0])
778 for nrr in range(table["window"], -1, -1):
779 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
780 for nrp in range(table["window"], -1, -1):
781 tbl_out = [item for item in tbl_reg if item[5] == nrp]
782 tbl_out.sort(key=lambda rel: rel[2])
783 tbl_sorted.extend(tbl_out)
785 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
787 logging.info(" Writing file: '{0}'".format(file_name))
788 with open(file_name, "w") as file_handler:
789 file_handler.write(header_str)
790 for test in tbl_sorted:
791 file_handler.write(",".join([str(item) for item in test]) + '\n')
793 txt_file_name = "{0}.txt".format(table["output-file"])
794 logging.info(" Writing file: '{0}'".format(txt_file_name))
795 convert_csv_to_pretty_txt(file_name, txt_file_name)
798 def _generate_url(base, test_name):
799 """Generate URL to a trending plot from the name of the test case.
801 :param base: The base part of URL common to all test cases.
802 :param test_name: The name of the test case.
805 :returns: The URL to the plot with the trending data for the given test
815 if "lbdpdk" in test_name or "lbvpp" in test_name:
816 file_name = "link_bonding.html"
818 elif "testpmd" in test_name or "l3fwd" in test_name:
819 file_name = "dpdk.html"
821 elif "memif" in test_name:
822 file_name = "container_memif.html"
824 elif "srv6" in test_name:
825 file_name = "srv6.html"
827 elif "vhost" in test_name:
828 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
829 file_name = "vm_vhost_l2.html"
830 elif "ip4base" in test_name:
831 file_name = "vm_vhost_ip4.html"
833 elif "ipsec" in test_name:
834 file_name = "ipsec.html"
836 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
837 file_name = "ip4_tunnels.html"
839 elif "ip4base" in test_name or "ip4scale" in test_name:
840 file_name = "ip4.html"
841 if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
842 feature = "-features"
844 elif "ip6base" in test_name or "ip6scale" in test_name:
845 file_name = "ip6.html"
847 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
848 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
849 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
850 file_name = "l2.html"
851 if "iacl" in test_name:
852 feature = "-features"
854 if "x520" in test_name:
856 elif "x710" in test_name:
858 elif "xl710" in test_name:
861 if "64b" in test_name:
863 elif "78b" in test_name:
865 elif "imix" in test_name:
867 elif "9000b" in test_name:
869 elif "1518" in test_name:
872 if "1t1c" in test_name:
874 elif "2t2c" in test_name:
876 elif "4t4c" in test_name:
879 return url + file_name + anchor + feature
882 def table_performance_trending_dashboard_html(table, input_data):
883 """Generate the table(s) with algorithm:
884 table_performance_trending_dashboard_html specified in the specification
887 :param table: Table to generate.
888 :param input_data: Data to process.
889 :type table: pandas.Series
890 :type input_data: InputData
893 logging.info(" Generating the table {0} ...".
894 format(table.get("title", "")))
897 with open(table["input-file"], 'rb') as csv_file:
898 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
899 csv_lst = [item for item in csv_content]
901 logging.warning("The input file is not defined.")
903 except csv.Error as err:
904 logging.warning("Not possible to process the file '{0}'.\n{1}".
905 format(table["input-file"], err))
909 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
912 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
913 for idx, item in enumerate(csv_lst[0]):
914 alignment = "left" if idx == 0 else "center"
915 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
919 colors = {"regression": ("#ffcccc", "#ff9999"),
920 "progression": ("#c6ecc6", "#9fdf9f"),
921 "normal": ("#e9f1fb", "#d4e4f7")}
922 for r_idx, row in enumerate(csv_lst[1:]):
926 color = "progression"
929 background = colors[color][r_idx % 2]
930 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
933 for c_idx, item in enumerate(row):
934 alignment = "left" if c_idx == 0 else "center"
935 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
938 url = _generate_url("../trending/", item)
939 ref = ET.SubElement(td, "a", attrib=dict(href=url))
944 with open(table["output-file"], 'w') as html_file:
945 logging.info(" Writing file: '{0}'".format(table["output-file"]))
946 html_file.write(".. raw:: html\n\n\t")
947 html_file.write(ET.tostring(dashboard))
948 html_file.write("\n\t<p><br><br></p>\n")
950 logging.warning("The output file is not defined.")
954 def table_failed_tests(table, input_data):
955 """Generate the table(s) with algorithm: table_failed_tests
956 specified in the specification file.
958 :param table: Table to generate.
959 :param input_data: Data to process.
960 :type table: pandas.Series
961 :type input_data: InputData
964 logging.info(" Generating the table {0} ...".
965 format(table.get("title", "")))
968 logging.info(" Creating the data set for the {0} '{1}'.".
969 format(table.get("type", ""), table.get("title", "")))
970 data = input_data.filter_data(table, continue_on_error=True)
972 # Prepare the header of the tables
973 header = ["Test Case",
975 "Last Failure [Time]",
976 "Last Failure [VPP-Build-Id]",
977 "Last Failure [CSIT-Job-Build-Id]"]
979 # Generate the data for the table according to the model in the table
982 for job, builds in table["data"].items():
985 for tst_name, tst_data in data[job][build].iteritems():
986 if tst_name.lower() in table["ignore-list"]:
988 if tbl_dict.get(tst_name, None) is None:
989 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
990 "-".join(tst_data["name"].
992 tbl_dict[tst_name] = {"name": name,
993 "data": OrderedDict()}
995 tbl_dict[tst_name]["data"][build] = (
997 input_data.metadata(job, build).get("generated", ""),
998 input_data.metadata(job, build).get("version", ""),
1000 except (TypeError, KeyError):
1001 pass # No data in output.xml for this test
1004 for tst_data in tbl_dict.values():
1005 win_size = min(len(tst_data["data"]), table["window"])
1007 for val in tst_data["data"].values()[-win_size:]:
1008 if val[0] == "FAIL":
1010 fails_last_date = val[1]
1011 fails_last_vpp = val[2]
1012 fails_last_csit = val[3]
1014 tbl_lst.append([tst_data["name"],
1018 "mrr-daily-build-{0}".format(fails_last_csit)])
1020 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1022 for nrf in range(table["window"], -1, -1):
1023 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1024 tbl_sorted.extend(tbl_fails)
1025 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1027 logging.info(" Writing file: '{0}'".format(file_name))
1028 with open(file_name, "w") as file_handler:
1029 file_handler.write(",".join(header) + "\n")
1030 for test in tbl_sorted:
1031 file_handler.write(",".join([str(item) for item in test]) + '\n')
1033 txt_file_name = "{0}.txt".format(table["output-file"])
1034 logging.info(" Writing file: '{0}'".format(txt_file_name))
1035 convert_csv_to_pretty_txt(file_name, txt_file_name)
1038 def table_failed_tests_html(table, input_data):
1039 """Generate the table(s) with algorithm: table_failed_tests_html
1040 specified in the specification file.
1042 :param table: Table to generate.
1043 :param input_data: Data to process.
1044 :type table: pandas.Series
1045 :type input_data: InputData
1048 logging.info(" Generating the table {0} ...".
1049 format(table.get("title", "")))
1052 with open(table["input-file"], 'rb') as csv_file:
1053 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1054 csv_lst = [item for item in csv_content]
1056 logging.warning("The input file is not defined.")
1058 except csv.Error as err:
1059 logging.warning("Not possible to process the file '{0}'.\n{1}".
1060 format(table["input-file"], err))
1064 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1067 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1068 for idx, item in enumerate(csv_lst[0]):
1069 alignment = "left" if idx == 0 else "center"
1070 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1074 colors = ("#e9f1fb", "#d4e4f7")
1075 for r_idx, row in enumerate(csv_lst[1:]):
1076 background = colors[r_idx % 2]
1077 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1080 for c_idx, item in enumerate(row):
1081 alignment = "left" if c_idx == 0 else "center"
1082 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1085 url = _generate_url("../trending/", item)
1086 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1091 with open(table["output-file"], 'w') as html_file:
1092 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1093 html_file.write(".. raw:: html\n\n\t")
1094 html_file.write(ET.tostring(failed_tests))
1095 html_file.write("\n\t<p><br><br></p>\n")
1097 logging.warning("The output file is not defined.")