1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("vat-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 if column["data"].split(" ")[1] in ("vat-history",
166 col_data = replace(col_data, " |br| ", "",
168 col_data = " |prein| {0} |preout| ".\
169 format(col_data[:-5])
170 row_lst.append('"{0}"'.format(col_data))
172 row_lst.append("No data")
173 table_lst.append(row_lst)
175 # Write the data to file
177 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178 table["output-file-ext"])
179 logging.info(" Writing file: '{}'".format(file_name))
180 with open(file_name, "w") as file_handler:
181 file_handler.write(",".join(header) + "\n")
182 for item in table_lst:
183 file_handler.write(",".join(item) + "\n")
185 logging.info(" Done.")
188 def table_performance_comparison(table, input_data):
189 """Generate the table(s) with algorithm: table_performance_comparison
190 specified in the specification file.
192 :param table: Table to generate.
193 :param input_data: Data to process.
194 :type table: pandas.Series
195 :type input_data: InputData
198 logging.info(" Generating the table {0} ...".
199 format(table.get("title", "")))
202 logging.info(" Creating the data set for the {0} '{1}'.".
203 format(table.get("type", ""), table.get("title", "")))
204 data = input_data.filter_data(table, continue_on_error=True)
206 # Prepare the header of the tables
208 header = ["Test case", ]
210 if table["include-tests"] == "MRR":
211 hdr_param = "Receive Rate"
213 hdr_param = "Throughput"
215 history = table.get("history", None)
219 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220 "{0} Stdev [Mpps]".format(item["title"])])
222 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
227 header_str = ",".join(header) + "\n"
228 except (AttributeError, KeyError) as err:
229 logging.error("The model is invalid, missing parameter: {0}".
233 # Prepare data to the table:
235 for job, builds in table["reference"]["data"].items():
237 for tst_name, tst_data in data[job][str(build)].iteritems():
238 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239 replace("-ndrpdr", "").replace("-pdrdisc", "").\
240 replace("-ndrdisc", "").replace("-pdr", "").\
241 replace("-ndr", "").\
242 replace("1t1c", "1c").replace("2t1c", "1c").\
243 replace("2t2c", "2c").replace("4t2c", "2c").\
244 replace("4t4c", "4c").replace("8t4c", "4c")
245 if "across topologies" in table["title"].lower():
246 tst_name_mod = tst_name_mod.replace("2n1l-", "")
247 if tbl_dict.get(tst_name_mod, None) is None:
248 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
249 "-".join(tst_data["name"].
251 if "across testbeds" in table["title"].lower() or \
252 "across topologies" in table["title"].lower():
254 replace("1t1c", "1c").replace("2t1c", "1c").\
255 replace("2t2c", "2c").replace("4t2c", "2c").\
256 replace("4t4c", "4c").replace("8t4c", "4c")
257 tbl_dict[tst_name_mod] = {"name": name,
261 # TODO: Re-work when NDRPDRDISC tests are not used
262 if table["include-tests"] == "MRR":
263 tbl_dict[tst_name_mod]["ref-data"]. \
264 append(tst_data["result"]["receive-rate"].avg)
265 elif table["include-tests"] == "PDR":
266 if tst_data["type"] == "PDR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["throughput"]["value"])
269 elif tst_data["type"] == "NDRPDR":
270 tbl_dict[tst_name_mod]["ref-data"].append(
271 tst_data["throughput"]["PDR"]["LOWER"])
272 elif table["include-tests"] == "NDR":
273 if tst_data["type"] == "NDR":
274 tbl_dict[tst_name_mod]["ref-data"]. \
275 append(tst_data["throughput"]["value"])
276 elif tst_data["type"] == "NDRPDR":
277 tbl_dict[tst_name_mod]["ref-data"].append(
278 tst_data["throughput"]["NDR"]["LOWER"])
282 pass # No data in output.xml for this test
284 for job, builds in table["compare"]["data"].items():
286 for tst_name, tst_data in data[job][str(build)].iteritems():
287 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
288 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
289 replace("-ndrdisc", "").replace("-pdr", ""). \
290 replace("-ndr", "").\
291 replace("1t1c", "1c").replace("2t1c", "1c").\
292 replace("2t2c", "2c").replace("4t2c", "2c").\
293 replace("4t4c", "4c").replace("8t4c", "4c")
294 if "across topologies" in table["title"].lower():
295 tst_name_mod = tst_name_mod.replace("2n1l-", "")
297 # TODO: Re-work when NDRPDRDISC tests are not used
298 if table["include-tests"] == "MRR":
299 tbl_dict[tst_name_mod]["cmp-data"]. \
300 append(tst_data["result"]["receive-rate"].avg)
301 elif table["include-tests"] == "PDR":
302 if tst_data["type"] == "PDR":
303 tbl_dict[tst_name_mod]["cmp-data"]. \
304 append(tst_data["throughput"]["value"])
305 elif tst_data["type"] == "NDRPDR":
306 tbl_dict[tst_name_mod]["cmp-data"].append(
307 tst_data["throughput"]["PDR"]["LOWER"])
308 elif table["include-tests"] == "NDR":
309 if tst_data["type"] == "NDR":
310 tbl_dict[tst_name_mod]["cmp-data"]. \
311 append(tst_data["throughput"]["value"])
312 elif tst_data["type"] == "NDRPDR":
313 tbl_dict[tst_name_mod]["cmp-data"].append(
314 tst_data["throughput"]["NDR"]["LOWER"])
320 tbl_dict.pop(tst_name_mod, None)
323 for job, builds in item["data"].items():
325 for tst_name, tst_data in data[job][str(build)].iteritems():
326 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
327 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
328 replace("-ndrdisc", "").replace("-pdr", ""). \
329 replace("-ndr", "").\
330 replace("1t1c", "1c").replace("2t1c", "1c").\
331 replace("2t2c", "2c").replace("4t2c", "2c").\
332 replace("4t4c", "4c").replace("8t4c", "4c")
333 if "across topologies" in table["title"].lower():
334 tst_name_mod = tst_name_mod.replace("2n1l-", "")
335 if tbl_dict.get(tst_name_mod, None) is None:
337 if tbl_dict[tst_name_mod].get("history", None) is None:
338 tbl_dict[tst_name_mod]["history"] = OrderedDict()
339 if tbl_dict[tst_name_mod]["history"].get(item["title"],
341 tbl_dict[tst_name_mod]["history"][item["title"]] = \
344 # TODO: Re-work when NDRPDRDISC tests are not used
345 if table["include-tests"] == "MRR":
346 tbl_dict[tst_name_mod]["history"][item["title"
347 ]].append(tst_data["result"]["receive-rate"].
349 elif table["include-tests"] == "PDR":
350 if tst_data["type"] == "PDR":
351 tbl_dict[tst_name_mod]["history"][
353 append(tst_data["throughput"]["value"])
354 elif tst_data["type"] == "NDRPDR":
355 tbl_dict[tst_name_mod]["history"][item[
356 "title"]].append(tst_data["throughput"][
358 elif table["include-tests"] == "NDR":
359 if tst_data["type"] == "NDR":
360 tbl_dict[tst_name_mod]["history"][
362 append(tst_data["throughput"]["value"])
363 elif tst_data["type"] == "NDRPDR":
364 tbl_dict[tst_name_mod]["history"][item[
365 "title"]].append(tst_data["throughput"][
369 except (TypeError, KeyError):
373 for tst_name in tbl_dict.keys():
374 item = [tbl_dict[tst_name]["name"], ]
376 if tbl_dict[tst_name].get("history", None) is not None:
377 for hist_data in tbl_dict[tst_name]["history"].values():
379 item.append(round(mean(hist_data) / 1000000, 2))
380 item.append(round(stdev(hist_data) / 1000000, 2))
382 item.extend([None, None])
384 item.extend([None, None])
385 data_t = tbl_dict[tst_name]["ref-data"]
387 item.append(round(mean(data_t) / 1000000, 2))
388 item.append(round(stdev(data_t) / 1000000, 2))
390 item.extend([None, None])
391 data_t = tbl_dict[tst_name]["cmp-data"]
393 item.append(round(mean(data_t) / 1000000, 2))
394 item.append(round(stdev(data_t) / 1000000, 2))
396 item.extend([None, None])
397 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
398 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
399 if len(item) == len(header):
402 # Sort the table according to the relative change
403 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
405 # Generate csv tables:
406 csv_file = "{0}.csv".format(table["output-file"])
407 with open(csv_file, "w") as file_handler:
408 file_handler.write(header_str)
410 file_handler.write(",".join([str(item) for item in test]) + "\n")
412 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
415 def table_performance_trending_dashboard(table, input_data):
416 """Generate the table(s) with algorithm:
417 table_performance_trending_dashboard
418 specified in the specification file.
420 :param table: Table to generate.
421 :param input_data: Data to process.
422 :type table: pandas.Series
423 :type input_data: InputData
426 logging.info(" Generating the table {0} ...".
427 format(table.get("title", "")))
430 logging.info(" Creating the data set for the {0} '{1}'.".
431 format(table.get("type", ""), table.get("title", "")))
432 data = input_data.filter_data(table, continue_on_error=True)
434 # Prepare the header of the tables
435 header = ["Test Case",
437 "Short-Term Change [%]",
438 "Long-Term Change [%]",
442 header_str = ",".join(header) + "\n"
444 # Prepare data to the table:
446 for job, builds in table["data"].items():
448 for tst_name, tst_data in data[job][str(build)].iteritems():
449 if tst_name.lower() in table["ignore-list"]:
451 if tbl_dict.get(tst_name, None) is None:
452 groups = re.search(REGEX_NIC, tst_data["parent"])
455 nic = groups.group(0)
456 tbl_dict[tst_name] = {
457 "name": "{0}-{1}".format(nic, tst_data["name"]),
458 "data": OrderedDict()}
460 tbl_dict[tst_name]["data"][str(build)] = \
461 tst_data["result"]["receive-rate"]
462 except (TypeError, KeyError):
463 pass # No data in output.xml for this test
466 for tst_name in tbl_dict.keys():
467 data_t = tbl_dict[tst_name]["data"]
471 classification_lst, avgs = classify_anomalies(data_t)
473 win_size = min(len(data_t), table["window"])
474 long_win_size = min(len(data_t), table["long-trend-window"])
478 [x for x in avgs[-long_win_size:-win_size]
483 avg_week_ago = avgs[max(-win_size, -len(avgs))]
485 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
486 rel_change_last = nan
488 rel_change_last = round(
489 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
491 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
492 rel_change_long = nan
494 rel_change_long = round(
495 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
497 if classification_lst:
498 if isnan(rel_change_last) and isnan(rel_change_long):
501 [tbl_dict[tst_name]["name"],
502 '-' if isnan(last_avg) else
503 round(last_avg / 1000000, 2),
504 '-' if isnan(rel_change_last) else rel_change_last,
505 '-' if isnan(rel_change_long) else rel_change_long,
506 classification_lst[-win_size:].count("regression"),
507 classification_lst[-win_size:].count("progression")])
509 tbl_lst.sort(key=lambda rel: rel[0])
512 for nrr in range(table["window"], -1, -1):
513 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
514 for nrp in range(table["window"], -1, -1):
515 tbl_out = [item for item in tbl_reg if item[5] == nrp]
516 tbl_out.sort(key=lambda rel: rel[2])
517 tbl_sorted.extend(tbl_out)
519 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
521 logging.info(" Writing file: '{0}'".format(file_name))
522 with open(file_name, "w") as file_handler:
523 file_handler.write(header_str)
524 for test in tbl_sorted:
525 file_handler.write(",".join([str(item) for item in test]) + '\n')
527 txt_file_name = "{0}.txt".format(table["output-file"])
528 logging.info(" Writing file: '{0}'".format(txt_file_name))
529 convert_csv_to_pretty_txt(file_name, txt_file_name)
532 def _generate_url(base, testbed, test_name):
533 """Generate URL to a trending plot from the name of the test case.
535 :param base: The base part of URL common to all test cases.
536 :param testbed: The testbed used for testing.
537 :param test_name: The name of the test case.
541 :returns: The URL to the plot with the trending data for the given test
551 if "lbdpdk" in test_name or "lbvpp" in test_name:
552 file_name = "link_bonding"
554 elif "114b" in test_name and "vhost" in test_name:
557 elif "testpmd" in test_name or "l3fwd" in test_name:
560 elif "memif" in test_name:
561 file_name = "container_memif"
564 elif "srv6" in test_name:
567 elif "vhost" in test_name:
568 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
569 file_name = "vm_vhost_l2"
570 if "114b" in test_name:
572 elif "l2xcbase" in test_name:
573 feature = "-base-l2xc"
574 elif "l2bdbasemaclrn" in test_name:
575 feature = "-base-l2bd"
578 elif "ip4base" in test_name:
579 file_name = "vm_vhost_ip4"
582 elif "ipsec" in test_name:
584 feature = "-base-scale"
586 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
587 file_name = "ip4_tunnels"
590 elif "ip4base" in test_name or "ip4scale" in test_name:
592 if "xl710" in test_name:
593 feature = "-base-scale-features"
594 elif "iacl" in test_name:
595 feature = "-features-iacl"
596 elif "oacl" in test_name:
597 feature = "-features-oacl"
598 elif "snat" in test_name or "cop" in test_name:
599 feature = "-features"
601 feature = "-base-scale"
603 elif "ip6base" in test_name or "ip6scale" in test_name:
605 feature = "-base-scale"
607 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
608 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
609 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
611 if "macip" in test_name:
612 feature = "-features-macip"
613 elif "iacl" in test_name:
614 feature = "-features-iacl"
615 elif "oacl" in test_name:
616 feature = "-features-oacl"
618 feature = "-base-scale"
620 if "x520" in test_name:
622 elif "x710" in test_name:
624 elif "xl710" in test_name:
626 elif "xxv710" in test_name:
632 if "64b" in test_name:
634 elif "78b" in test_name:
636 elif "imix" in test_name:
638 elif "9000b" in test_name:
640 elif "1518b" in test_name:
642 elif "114b" in test_name:
646 anchor += framesize + '-'
648 if "1t1c" in test_name:
650 elif "2t2c" in test_name:
652 elif "4t4c" in test_name:
654 elif "2t1c" in test_name:
656 elif "4t2c" in test_name:
658 elif "8t4c" in test_name:
661 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
665 def table_performance_trending_dashboard_html(table, input_data):
666 """Generate the table(s) with algorithm:
667 table_performance_trending_dashboard_html specified in the specification
670 :param table: Table to generate.
671 :param input_data: Data to process.
673 :type input_data: InputData
676 testbed = table.get("testbed", None)
678 logging.error("The testbed is not defined for the table '{0}'.".
679 format(table.get("title", "")))
682 logging.info(" Generating the table {0} ...".
683 format(table.get("title", "")))
686 with open(table["input-file"], 'rb') as csv_file:
687 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
688 csv_lst = [item for item in csv_content]
690 logging.warning("The input file is not defined.")
692 except csv.Error as err:
693 logging.warning("Not possible to process the file '{0}'.\n{1}".
694 format(table["input-file"], err))
698 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
701 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
702 for idx, item in enumerate(csv_lst[0]):
703 alignment = "left" if idx == 0 else "center"
704 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
708 colors = {"regression": ("#ffcccc", "#ff9999"),
709 "progression": ("#c6ecc6", "#9fdf9f"),
710 "normal": ("#e9f1fb", "#d4e4f7")}
711 for r_idx, row in enumerate(csv_lst[1:]):
715 color = "progression"
718 background = colors[color][r_idx % 2]
719 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
722 for c_idx, item in enumerate(row):
723 alignment = "left" if c_idx == 0 else "center"
724 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
727 url = _generate_url("../trending/", testbed, item)
728 ref = ET.SubElement(td, "a", attrib=dict(href=url))
733 with open(table["output-file"], 'w') as html_file:
734 logging.info(" Writing file: '{0}'".format(table["output-file"]))
735 html_file.write(".. raw:: html\n\n\t")
736 html_file.write(ET.tostring(dashboard))
737 html_file.write("\n\t<p><br><br></p>\n")
739 logging.warning("The output file is not defined.")
743 def table_failed_tests(table, input_data):
744 """Generate the table(s) with algorithm: table_failed_tests
745 specified in the specification file.
747 :param table: Table to generate.
748 :param input_data: Data to process.
749 :type table: pandas.Series
750 :type input_data: InputData
753 logging.info(" Generating the table {0} ...".
754 format(table.get("title", "")))
757 logging.info(" Creating the data set for the {0} '{1}'.".
758 format(table.get("type", ""), table.get("title", "")))
759 data = input_data.filter_data(table, continue_on_error=True)
761 # Prepare the header of the tables
762 header = ["Test Case",
764 "Last Failure [Time]",
765 "Last Failure [VPP-Build-Id]",
766 "Last Failure [CSIT-Job-Build-Id]"]
768 # Generate the data for the table according to the model in the table
772 timeperiod = timedelta(int(table.get("window", 7)))
775 for job, builds in table["data"].items():
778 for tst_name, tst_data in data[job][build].iteritems():
779 if tst_name.lower() in table["ignore-list"]:
781 if tbl_dict.get(tst_name, None) is None:
782 groups = re.search(REGEX_NIC, tst_data["parent"])
785 nic = groups.group(0)
786 tbl_dict[tst_name] = {
787 "name": "{0}-{1}".format(nic, tst_data["name"]),
788 "data": OrderedDict()}
790 generated = input_data.metadata(job, build).\
794 then = dt.strptime(generated, "%Y%m%d %H:%M")
795 if (now - then) <= timeperiod:
796 tbl_dict[tst_name]["data"][build] = (
799 input_data.metadata(job, build).get("version", ""),
801 except (TypeError, KeyError):
802 pass # No data in output.xml for this test
805 for tst_data in tbl_dict.values():
807 for val in tst_data["data"].values():
810 fails_last_date = val[1]
811 fails_last_vpp = val[2]
812 fails_last_csit = val[3]
814 tbl_lst.append([tst_data["name"],
818 "mrr-daily-build-{0}".format(fails_last_csit)])
820 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
822 for nrf in range(table["window"], -1, -1):
823 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
824 tbl_sorted.extend(tbl_fails)
825 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
827 logging.info(" Writing file: '{0}'".format(file_name))
828 with open(file_name, "w") as file_handler:
829 file_handler.write(",".join(header) + "\n")
830 for test in tbl_sorted:
831 file_handler.write(",".join([str(item) for item in test]) + '\n')
833 txt_file_name = "{0}.txt".format(table["output-file"])
834 logging.info(" Writing file: '{0}'".format(txt_file_name))
835 convert_csv_to_pretty_txt(file_name, txt_file_name)
838 def table_failed_tests_html(table, input_data):
839 """Generate the table(s) with algorithm: table_failed_tests_html
840 specified in the specification file.
842 :param table: Table to generate.
843 :param input_data: Data to process.
844 :type table: pandas.Series
845 :type input_data: InputData
848 testbed = table.get("testbed", None)
850 logging.error("The testbed is not defined for the table '{0}'.".
851 format(table.get("title", "")))
854 logging.info(" Generating the table {0} ...".
855 format(table.get("title", "")))
858 with open(table["input-file"], 'rb') as csv_file:
859 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
860 csv_lst = [item for item in csv_content]
862 logging.warning("The input file is not defined.")
864 except csv.Error as err:
865 logging.warning("Not possible to process the file '{0}'.\n{1}".
866 format(table["input-file"], err))
870 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
873 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
874 for idx, item in enumerate(csv_lst[0]):
875 alignment = "left" if idx == 0 else "center"
876 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
880 colors = ("#e9f1fb", "#d4e4f7")
881 for r_idx, row in enumerate(csv_lst[1:]):
882 background = colors[r_idx % 2]
883 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
886 for c_idx, item in enumerate(row):
887 alignment = "left" if c_idx == 0 else "center"
888 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
891 url = _generate_url("../trending/", testbed, item)
892 ref = ET.SubElement(td, "a", attrib=dict(href=url))
897 with open(table["output-file"], 'w') as html_file:
898 logging.info(" Writing file: '{0}'".format(table["output-file"]))
899 html_file.write(".. raw:: html\n\n\t")
900 html_file.write(ET.tostring(failed_tests))
901 html_file.write("\n\t<p><br><br></p>\n")
903 logging.warning("The output file is not defined.")