1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 def generate_tables(spec, data):
32 """Generate all tables specified in the specification file.
34 :param spec: Specification read from the specification file.
35 :param data: Data to process.
36 :type spec: Specification
40 logging.info("Generating the tables ...")
41 for table in spec.tables:
43 eval(table["algorithm"])(table, data)
44 except NameError as err:
45 logging.error("Probably algorithm '{alg}' is not defined: {err}".
46 format(alg=table["algorithm"], err=repr(err)))
50 def table_details(table, input_data):
51 """Generate the table(s) with algorithm: table_detailed_test_results
52 specified in the specification file.
54 :param table: Table to generate.
55 :param input_data: Data to process.
56 :type table: pandas.Series
57 :type input_data: InputData
60 logging.info(" Generating the table {0} ...".
61 format(table.get("title", "")))
64 logging.info(" Creating the data set for the {0} '{1}'.".
65 format(table.get("type", ""), table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 logging.info(" Creating the data set for the {0} '{1}'.".
133 format(table.get("type", ""), table.get("title", "")))
134 data = input_data.filter_data(table)
135 data = input_data.merge_data(data)
136 data.sort_index(inplace=True)
138 logging.info(" Creating the data set for the {0} '{1}'.".
139 format(table.get("type", ""), table.get("title", "")))
140 suites = input_data.filter_data(table, data_set="suites")
141 suites = input_data.merge_data(suites)
143 # Prepare the header of the tables
145 for column in table["columns"]:
146 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
148 for _, suite in suites.iteritems():
150 suite_name = suite["name"]
152 for test in data.keys():
153 if data[test]["parent"] in suite_name:
155 for column in table["columns"]:
157 col_data = str(data[test][column["data"].
158 split(" ")[1]]).replace('"', '""')
159 if column["data"].split(" ")[1] in ("vat-history",
161 col_data = replace(col_data, " |br| ", "",
163 col_data = " |prein| {0} |preout| ".\
164 format(col_data[:-5])
165 row_lst.append('"{0}"'.format(col_data))
167 row_lst.append("No data")
168 table_lst.append(row_lst)
170 # Write the data to file
172 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173 table["output-file-ext"])
174 logging.info(" Writing file: '{}'".format(file_name))
175 with open(file_name, "w") as file_handler:
176 file_handler.write(",".join(header) + "\n")
177 for item in table_lst:
178 file_handler.write(",".join(item) + "\n")
180 logging.info(" Done.")
183 def table_performance_improvements(table, input_data):
184 """Generate the table(s) with algorithm: table_performance_improvements
185 specified in the specification file.
187 # FIXME: Not used now.
189 :param table: Table to generate.
190 :param input_data: Data to process.
191 :type table: pandas.Series
192 :type input_data: InputData
195 def _write_line_to_file(file_handler, data):
196 """Write a line to the .csv file.
198 :param file_handler: File handler for the csv file. It must be open for
200 :param data: Item to be written to the file.
201 :type file_handler: BinaryIO
207 if isinstance(item["data"], str):
208 # Remove -?drdisc from the end
209 if item["data"].endswith("drdisc"):
210 item["data"] = item["data"][:-8]
211 line_lst.append(item["data"])
212 elif isinstance(item["data"], float):
213 line_lst.append("{:.1f}".format(item["data"]))
214 elif item["data"] is None:
216 file_handler.write(",".join(line_lst) + "\n")
218 logging.info(" Generating the table {0} ...".
219 format(table.get("title", "")))
222 file_name = table.get("template", None)
225 tmpl = _read_csv_template(file_name)
226 except PresentationError:
227 logging.error(" The template '{0}' does not exist. Skipping the "
228 "table.".format(file_name))
231 logging.error("The template is not defined. Skipping the table.")
235 logging.info(" Creating the data set for the {0} '{1}'.".
236 format(table.get("type", ""), table.get("title", "")))
237 data = input_data.filter_data(table)
239 # Prepare the header of the tables
241 for column in table["columns"]:
242 header.append(column["title"])
244 # Generate the data for the table according to the model in the table
247 for tmpl_item in tmpl:
249 for column in table["columns"]:
250 cmd = column["data"].split(" ")[0]
251 args = column["data"].split(" ")[1:]
252 if cmd == "template":
254 val = float(tmpl_item[int(args[0])])
256 val = tmpl_item[int(args[0])]
257 tbl_item.append({"data": val})
263 for build in data[job]:
265 data_lst.append(float(build[tmpl_item[0]]
266 ["throughput"]["value"]))
267 except (KeyError, TypeError):
271 tbl_item.append({"data": (eval(operation)(data_lst)) /
274 tbl_item.append({"data": None})
275 elif cmd == "operation":
278 nr1 = float(tbl_item[int(args[1])]["data"])
279 nr2 = float(tbl_item[int(args[2])]["data"])
281 tbl_item.append({"data": eval(operation)(nr1, nr2)})
283 tbl_item.append({"data": None})
284 except (IndexError, ValueError, TypeError):
285 logging.error("No data for {0}".format(tbl_item[0]["data"]))
286 tbl_item.append({"data": None})
289 logging.error("Not supported command {0}. Skipping the table.".
292 tbl_lst.append(tbl_item)
294 # Sort the table according to the relative change
295 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
297 # Create the tables and write them to the files
299 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
302 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
305 for file_name in file_names:
306 logging.info(" Writing the file '{0}'".format(file_name))
307 with open(file_name, "w") as file_handler:
308 file_handler.write(",".join(header) + "\n")
310 if isinstance(item[-1]["data"], float):
311 rel_change = round(item[-1]["data"], 1)
313 rel_change = item[-1]["data"]
314 if "ndr_top" in file_name \
315 and "ndr" in item[0]["data"] \
316 and rel_change >= 10.0:
317 _write_line_to_file(file_handler, item)
318 elif "pdr_top" in file_name \
319 and "pdr" in item[0]["data"] \
320 and rel_change >= 10.0:
321 _write_line_to_file(file_handler, item)
322 elif "ndr_low" in file_name \
323 and "ndr" in item[0]["data"] \
324 and rel_change < 10.0:
325 _write_line_to_file(file_handler, item)
326 elif "pdr_low" in file_name \
327 and "pdr" in item[0]["data"] \
328 and rel_change < 10.0:
329 _write_line_to_file(file_handler, item)
331 logging.info(" Done.")
334 def _read_csv_template(file_name):
335 """Read the template from a .csv file.
337 # FIXME: Not used now.
339 :param file_name: Name / full path / relative path of the file to read.
341 :returns: Data from the template as list (lines) of lists (items on line).
343 :raises: PresentationError if it is not possible to read the file.
347 with open(file_name, 'r') as csv_file:
349 for line in csv_file:
350 tmpl_data.append(line[:-1].split(","))
352 except IOError as err:
353 raise PresentationError(str(err), level="ERROR")
356 def table_performance_comparison(table, input_data):
357 """Generate the table(s) with algorithm: table_performance_comparison
358 specified in the specification file.
360 :param table: Table to generate.
361 :param input_data: Data to process.
362 :type table: pandas.Series
363 :type input_data: InputData
366 logging.info(" Generating the table {0} ...".
367 format(table.get("title", "")))
370 logging.info(" Creating the data set for the {0} '{1}'.".
371 format(table.get("type", ""), table.get("title", "")))
372 data = input_data.filter_data(table, continue_on_error=True)
374 # Prepare the header of the tables
376 header = ["Test case", ]
378 if table["include-tests"] == "MRR":
379 hdr_param = "Receive Rate"
381 hdr_param = "Throughput"
383 history = table.get("history", None)
387 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
388 "{0} Stdev [Mpps]".format(item["title"])])
390 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
391 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
392 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
393 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
395 header_str = ",".join(header) + "\n"
396 except (AttributeError, KeyError) as err:
397 logging.error("The model is invalid, missing parameter: {0}".
401 # Prepare data to the table:
403 for job, builds in table["reference"]["data"].items():
405 for tst_name, tst_data in data[job][str(build)].iteritems():
406 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
407 replace("-ndrpdr", "").replace("-pdrdisc", "").\
408 replace("-ndrdisc", "").replace("-pdr", "").\
410 if tbl_dict.get(tst_name_mod, None) is None:
411 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
412 "-".join(tst_data["name"].
414 tbl_dict[tst_name_mod] = {"name": name,
418 # TODO: Re-work when NDRPDRDISC tests are not used
419 if table["include-tests"] == "MRR":
420 tbl_dict[tst_name_mod]["ref-data"]. \
421 append(tst_data["result"]["receive-rate"].avg)
422 elif table["include-tests"] == "PDR":
423 if tst_data["type"] == "PDR":
424 tbl_dict[tst_name_mod]["ref-data"]. \
425 append(tst_data["throughput"]["value"])
426 elif tst_data["type"] == "NDRPDR":
427 tbl_dict[tst_name_mod]["ref-data"].append(
428 tst_data["throughput"]["PDR"]["LOWER"])
429 elif table["include-tests"] == "NDR":
430 if tst_data["type"] == "NDR":
431 tbl_dict[tst_name_mod]["ref-data"]. \
432 append(tst_data["throughput"]["value"])
433 elif tst_data["type"] == "NDRPDR":
434 tbl_dict[tst_name_mod]["ref-data"].append(
435 tst_data["throughput"]["NDR"]["LOWER"])
439 pass # No data in output.xml for this test
441 for job, builds in table["compare"]["data"].items():
443 for tst_name, tst_data in data[job][str(build)].iteritems():
444 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
445 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
446 replace("-ndrdisc", "").replace("-pdr", ""). \
449 # TODO: Re-work when NDRPDRDISC tests are not used
450 if table["include-tests"] == "MRR":
451 tbl_dict[tst_name_mod]["cmp-data"]. \
452 append(tst_data["result"]["receive-rate"].avg)
453 elif table["include-tests"] == "PDR":
454 if tst_data["type"] == "PDR":
455 tbl_dict[tst_name_mod]["cmp-data"]. \
456 append(tst_data["throughput"]["value"])
457 elif tst_data["type"] == "NDRPDR":
458 tbl_dict[tst_name_mod]["cmp-data"].append(
459 tst_data["throughput"]["PDR"]["LOWER"])
460 elif table["include-tests"] == "NDR":
461 if tst_data["type"] == "NDR":
462 tbl_dict[tst_name_mod]["cmp-data"]. \
463 append(tst_data["throughput"]["value"])
464 elif tst_data["type"] == "NDRPDR":
465 tbl_dict[tst_name_mod]["cmp-data"].append(
466 tst_data["throughput"]["NDR"]["LOWER"])
472 tbl_dict.pop(tst_name_mod, None)
475 for job, builds in item["data"].items():
477 for tst_name, tst_data in data[job][str(build)].iteritems():
478 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
479 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
480 replace("-ndrdisc", "").replace("-pdr", ""). \
482 if tbl_dict.get(tst_name_mod, None) is None:
484 if tbl_dict[tst_name_mod].get("history", None) is None:
485 tbl_dict[tst_name_mod]["history"] = OrderedDict()
486 if tbl_dict[tst_name_mod]["history"].get(item["title"],
488 tbl_dict[tst_name_mod]["history"][item["title"]] = \
491 # TODO: Re-work when NDRPDRDISC tests are not used
492 if table["include-tests"] == "MRR":
493 tbl_dict[tst_name_mod]["history"][item["title"
494 ]].append(tst_data["result"]["receive-rate"].
496 elif table["include-tests"] == "PDR":
497 if tst_data["type"] == "PDR":
498 tbl_dict[tst_name_mod]["history"][
500 append(tst_data["throughput"]["value"])
501 elif tst_data["type"] == "NDRPDR":
502 tbl_dict[tst_name_mod]["history"][item[
503 "title"]].append(tst_data["throughput"][
505 elif table["include-tests"] == "NDR":
506 if tst_data["type"] == "NDR":
507 tbl_dict[tst_name_mod]["history"][
509 append(tst_data["throughput"]["value"])
510 elif tst_data["type"] == "NDRPDR":
511 tbl_dict[tst_name_mod]["history"][item[
512 "title"]].append(tst_data["throughput"][
516 except (TypeError, KeyError):
520 for tst_name in tbl_dict.keys():
521 item = [tbl_dict[tst_name]["name"], ]
523 if tbl_dict[tst_name].get("history", None) is not None:
524 for hist_data in tbl_dict[tst_name]["history"].values():
526 item.append(round(mean(hist_data) / 1000000, 2))
527 item.append(round(stdev(hist_data) / 1000000, 2))
529 item.extend([None, None])
531 item.extend([None, None])
532 data_t = tbl_dict[tst_name]["ref-data"]
534 item.append(round(mean(data_t) / 1000000, 2))
535 item.append(round(stdev(data_t) / 1000000, 2))
537 item.extend([None, None])
538 data_t = tbl_dict[tst_name]["cmp-data"]
540 item.append(round(mean(data_t) / 1000000, 2))
541 item.append(round(stdev(data_t) / 1000000, 2))
543 item.extend([None, None])
544 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
545 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
546 if len(item) == len(header):
549 # Sort the table according to the relative change
550 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
552 # Generate csv tables:
553 csv_file = "{0}.csv".format(table["output-file"])
554 with open(csv_file, "w") as file_handler:
555 file_handler.write(header_str)
557 file_handler.write(",".join([str(item) for item in test]) + "\n")
559 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
562 def table_performance_trending_dashboard(table, input_data):
563 """Generate the table(s) with algorithm:
564 table_performance_trending_dashboard
565 specified in the specification file.
567 :param table: Table to generate.
568 :param input_data: Data to process.
569 :type table: pandas.Series
570 :type input_data: InputData
573 logging.info(" Generating the table {0} ...".
574 format(table.get("title", "")))
577 logging.info(" Creating the data set for the {0} '{1}'.".
578 format(table.get("type", ""), table.get("title", "")))
579 data = input_data.filter_data(table, continue_on_error=True)
581 # Prepare the header of the tables
582 header = ["Test Case",
584 "Short-Term Change [%]",
585 "Long-Term Change [%]",
589 header_str = ",".join(header) + "\n"
591 # Prepare data to the table:
593 for job, builds in table["data"].items():
595 for tst_name, tst_data in data[job][str(build)].iteritems():
596 if tst_name.lower() in table["ignore-list"]:
598 if tbl_dict.get(tst_name, None) is None:
599 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
601 tbl_dict[tst_name] = {"name": name,
602 "data": OrderedDict()}
604 tbl_dict[tst_name]["data"][str(build)] = \
605 tst_data["result"]["receive-rate"]
606 except (TypeError, KeyError):
607 pass # No data in output.xml for this test
610 for tst_name in tbl_dict.keys():
611 data_t = tbl_dict[tst_name]["data"]
615 classification_lst, avgs = classify_anomalies(data_t)
617 win_size = min(len(data_t), table["window"])
618 long_win_size = min(len(data_t), table["long-trend-window"])
622 [x for x in avgs[-long_win_size:-win_size]
627 avg_week_ago = avgs[max(-win_size, -len(avgs))]
629 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
630 rel_change_last = nan
632 rel_change_last = round(
633 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
635 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
636 rel_change_long = nan
638 rel_change_long = round(
639 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
641 if classification_lst:
642 if isnan(rel_change_last) and isnan(rel_change_long):
645 [tbl_dict[tst_name]["name"],
646 '-' if isnan(last_avg) else
647 round(last_avg / 1000000, 2),
648 '-' if isnan(rel_change_last) else rel_change_last,
649 '-' if isnan(rel_change_long) else rel_change_long,
650 classification_lst[-win_size:].count("regression"),
651 classification_lst[-win_size:].count("progression")])
653 tbl_lst.sort(key=lambda rel: rel[0])
656 for nrr in range(table["window"], -1, -1):
657 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
658 for nrp in range(table["window"], -1, -1):
659 tbl_out = [item for item in tbl_reg if item[5] == nrp]
660 tbl_out.sort(key=lambda rel: rel[2])
661 tbl_sorted.extend(tbl_out)
663 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
665 logging.info(" Writing file: '{0}'".format(file_name))
666 with open(file_name, "w") as file_handler:
667 file_handler.write(header_str)
668 for test in tbl_sorted:
669 file_handler.write(",".join([str(item) for item in test]) + '\n')
671 txt_file_name = "{0}.txt".format(table["output-file"])
672 logging.info(" Writing file: '{0}'".format(txt_file_name))
673 convert_csv_to_pretty_txt(file_name, txt_file_name)
676 def _generate_url(base, test_name):
677 """Generate URL to a trending plot from the name of the test case.
679 :param base: The base part of URL common to all test cases.
680 :param test_name: The name of the test case.
683 :returns: The URL to the plot with the trending data for the given test
693 if "lbdpdk" in test_name or "lbvpp" in test_name:
694 file_name = "link_bonding.html"
696 elif "testpmd" in test_name or "l3fwd" in test_name:
697 file_name = "dpdk.html"
699 elif "memif" in test_name:
700 file_name = "container_memif.html"
702 elif "srv6" in test_name:
703 file_name = "srv6.html"
705 elif "vhost" in test_name:
706 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
707 file_name = "vm_vhost_l2.html"
708 elif "ip4base" in test_name:
709 file_name = "vm_vhost_ip4.html"
711 elif "ipsec" in test_name:
712 file_name = "ipsec.html"
714 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
715 file_name = "ip4_tunnels.html"
717 elif "ip4base" in test_name or "ip4scale" in test_name:
718 file_name = "ip4.html"
719 if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
720 feature = "-features"
722 elif "ip6base" in test_name or "ip6scale" in test_name:
723 file_name = "ip6.html"
725 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
726 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
727 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
728 file_name = "l2.html"
729 if "iacl" in test_name:
730 feature = "-features"
732 if "x520" in test_name:
734 elif "x710" in test_name:
736 elif "xl710" in test_name:
739 if "64b" in test_name:
741 elif "78b" in test_name:
743 elif "imix" in test_name:
745 elif "9000b" in test_name:
747 elif "1518" in test_name:
750 if "1t1c" in test_name:
752 elif "2t2c" in test_name:
754 elif "4t4c" in test_name:
757 return url + file_name + anchor + feature
760 def table_performance_trending_dashboard_html(table, input_data):
761 """Generate the table(s) with algorithm:
762 table_performance_trending_dashboard_html specified in the specification
765 :param table: Table to generate.
766 :param input_data: Data to process.
767 :type table: pandas.Series
768 :type input_data: InputData
771 logging.info(" Generating the table {0} ...".
772 format(table.get("title", "")))
775 with open(table["input-file"], 'rb') as csv_file:
776 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
777 csv_lst = [item for item in csv_content]
779 logging.warning("The input file is not defined.")
781 except csv.Error as err:
782 logging.warning("Not possible to process the file '{0}'.\n{1}".
783 format(table["input-file"], err))
787 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
790 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
791 for idx, item in enumerate(csv_lst[0]):
792 alignment = "left" if idx == 0 else "center"
793 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
797 colors = {"regression": ("#ffcccc", "#ff9999"),
798 "progression": ("#c6ecc6", "#9fdf9f"),
799 "normal": ("#e9f1fb", "#d4e4f7")}
800 for r_idx, row in enumerate(csv_lst[1:]):
804 color = "progression"
807 background = colors[color][r_idx % 2]
808 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
811 for c_idx, item in enumerate(row):
812 alignment = "left" if c_idx == 0 else "center"
813 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
816 url = _generate_url("../trending/", item)
817 ref = ET.SubElement(td, "a", attrib=dict(href=url))
822 with open(table["output-file"], 'w') as html_file:
823 logging.info(" Writing file: '{0}'".format(table["output-file"]))
824 html_file.write(".. raw:: html\n\n\t")
825 html_file.write(ET.tostring(dashboard))
826 html_file.write("\n\t<p><br><br></p>\n")
828 logging.warning("The output file is not defined.")
832 def table_failed_tests(table, input_data):
833 """Generate the table(s) with algorithm: table_failed_tests
834 specified in the specification file.
836 :param table: Table to generate.
837 :param input_data: Data to process.
838 :type table: pandas.Series
839 :type input_data: InputData
842 logging.info(" Generating the table {0} ...".
843 format(table.get("title", "")))
846 logging.info(" Creating the data set for the {0} '{1}'.".
847 format(table.get("type", ""), table.get("title", "")))
848 data = input_data.filter_data(table, continue_on_error=True)
850 # Prepare the header of the tables
851 header = ["Test Case",
853 "Last Failure [Time]",
854 "Last Failure [VPP-Build-Id]",
855 "Last Failure [CSIT-Job-Build-Id]"]
857 # Generate the data for the table according to the model in the table
860 for job, builds in table["data"].items():
863 for tst_name, tst_data in data[job][build].iteritems():
864 if tst_name.lower() in table["ignore-list"]:
866 if tbl_dict.get(tst_name, None) is None:
867 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
869 tbl_dict[tst_name] = {"name": name,
870 "data": OrderedDict()}
872 tbl_dict[tst_name]["data"][build] = (
874 input_data.metadata(job, build).get("generated", ""),
875 input_data.metadata(job, build).get("version", ""),
877 except (TypeError, KeyError):
878 pass # No data in output.xml for this test
881 for tst_data in tbl_dict.values():
882 win_size = min(len(tst_data["data"]), table["window"])
884 for val in tst_data["data"].values()[-win_size:]:
887 fails_last_date = val[1]
888 fails_last_vpp = val[2]
889 fails_last_csit = val[3]
891 tbl_lst.append([tst_data["name"],
895 "mrr-daily-build-{0}".format(fails_last_csit)])
897 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
899 for nrf in range(table["window"], -1, -1):
900 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
901 tbl_sorted.extend(tbl_fails)
902 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
904 logging.info(" Writing file: '{0}'".format(file_name))
905 with open(file_name, "w") as file_handler:
906 file_handler.write(",".join(header) + "\n")
907 for test in tbl_sorted:
908 file_handler.write(",".join([str(item) for item in test]) + '\n')
910 txt_file_name = "{0}.txt".format(table["output-file"])
911 logging.info(" Writing file: '{0}'".format(txt_file_name))
912 convert_csv_to_pretty_txt(file_name, txt_file_name)
915 def table_failed_tests_html(table, input_data):
916 """Generate the table(s) with algorithm: table_failed_tests_html
917 specified in the specification file.
919 :param table: Table to generate.
920 :param input_data: Data to process.
921 :type table: pandas.Series
922 :type input_data: InputData
925 logging.info(" Generating the table {0} ...".
926 format(table.get("title", "")))
929 with open(table["input-file"], 'rb') as csv_file:
930 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
931 csv_lst = [item for item in csv_content]
933 logging.warning("The input file is not defined.")
935 except csv.Error as err:
936 logging.warning("Not possible to process the file '{0}'.\n{1}".
937 format(table["input-file"], err))
941 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
944 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
945 for idx, item in enumerate(csv_lst[0]):
946 alignment = "left" if idx == 0 else "center"
947 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
951 colors = ("#e9f1fb", "#d4e4f7")
952 for r_idx, row in enumerate(csv_lst[1:]):
953 background = colors[r_idx % 2]
954 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
957 for c_idx, item in enumerate(row):
958 alignment = "left" if c_idx == 0 else "center"
959 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
962 url = _generate_url("../trending/", item)
963 ref = ET.SubElement(td, "a", attrib=dict(href=url))
968 with open(table["output-file"], 'w') as html_file:
969 logging.info(" Writing file: '{0}'".format(table["output-file"]))
970 html_file.write(".. raw:: html\n\n\t")
971 html_file.write(ET.tostring(failed_tests))
972 html_file.write("\n\t<p><br><br></p>\n")
974 logging.warning("The output file is not defined.")