1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34 def generate_tables(spec, data):
35 """Generate all tables specified in the specification file.
37 :param spec: Specification read from the specification file.
38 :param data: Data to process.
39 :type spec: Specification
43 logging.info("Generating the tables ...")
44 for table in spec.tables:
46 eval(table["algorithm"])(table, data)
47 except NameError as err:
48 logging.error("Probably algorithm '{alg}' is not defined: {err}".
49 format(alg=table["algorithm"], err=repr(err)))
53 def table_details(table, input_data):
54 """Generate the table(s) with algorithm: table_detailed_test_results
55 specified in the specification file.
57 :param table: Table to generate.
58 :param input_data: Data to process.
59 :type table: pandas.Series
60 :type input_data: InputData
63 logging.info(" Generating the table {0} ...".
64 format(table.get("title", "")))
67 logging.info(" Creating the data set for the {0} '{1}'.".
68 format(table.get("type", ""), table.get("title", "")))
69 data = input_data.filter_data(table)
71 # Prepare the header of the tables
73 for column in table["columns"]:
74 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
76 # Generate the data for the table according to the model in the table
78 job = table["data"].keys()[0]
79 build = str(table["data"][job][0])
81 suites = input_data.suites(job, build)
83 logging.error(" No data available. The table will not be generated.")
86 for suite_longname, suite in suites.iteritems():
88 suite_name = suite["name"]
90 for test in data[job][build].keys():
91 if data[job][build][test]["parent"] in suite_name:
93 for column in table["columns"]:
95 col_data = str(data[job][build][test][column["data"].
96 split(" ")[1]]).replace('"', '""')
97 if column["data"].split(" ")[1] in ("vat-history",
99 col_data = replace(col_data, " |br| ", "",
101 col_data = " |prein| {0} |preout| ".\
102 format(col_data[:-5])
103 row_lst.append('"{0}"'.format(col_data))
105 row_lst.append("No data")
106 table_lst.append(row_lst)
108 # Write the data to file
110 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111 table["output-file-ext"])
112 logging.info(" Writing file: '{}'".format(file_name))
113 with open(file_name, "w") as file_handler:
114 file_handler.write(",".join(header) + "\n")
115 for item in table_lst:
116 file_handler.write(",".join(item) + "\n")
118 logging.info(" Done.")
121 def table_merged_details(table, input_data):
122 """Generate the table(s) with algorithm: table_merged_details
123 specified in the specification file.
125 :param table: Table to generate.
126 :param input_data: Data to process.
127 :type table: pandas.Series
128 :type input_data: InputData
131 logging.info(" Generating the table {0} ...".
132 format(table.get("title", "")))
135 logging.info(" Creating the data set for the {0} '{1}'.".
136 format(table.get("type", ""), table.get("title", "")))
137 data = input_data.filter_data(table)
138 data = input_data.merge_data(data)
139 data.sort_index(inplace=True)
141 logging.info(" Creating the data set for the {0} '{1}'.".
142 format(table.get("type", ""), table.get("title", "")))
143 suites = input_data.filter_data(table, data_set="suites")
144 suites = input_data.merge_data(suites)
146 # Prepare the header of the tables
148 for column in table["columns"]:
149 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
151 for _, suite in suites.iteritems():
153 suite_name = suite["name"]
155 for test in data.keys():
156 if data[test]["parent"] in suite_name:
158 for column in table["columns"]:
160 col_data = str(data[test][column["data"].
161 split(" ")[1]]).replace('"', '""')
162 if column["data"].split(" ")[1] in ("vat-history",
164 col_data = replace(col_data, " |br| ", "",
166 col_data = " |prein| {0} |preout| ".\
167 format(col_data[:-5])
168 row_lst.append('"{0}"'.format(col_data))
170 row_lst.append("No data")
171 table_lst.append(row_lst)
173 # Write the data to file
175 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176 table["output-file-ext"])
177 logging.info(" Writing file: '{}'".format(file_name))
178 with open(file_name, "w") as file_handler:
179 file_handler.write(",".join(header) + "\n")
180 for item in table_lst:
181 file_handler.write(",".join(item) + "\n")
183 logging.info(" Done.")
186 def table_performance_comparison(table, input_data):
187 """Generate the table(s) with algorithm: table_performance_comparison
188 specified in the specification file.
190 :param table: Table to generate.
191 :param input_data: Data to process.
192 :type table: pandas.Series
193 :type input_data: InputData
196 logging.info(" Generating the table {0} ...".
197 format(table.get("title", "")))
200 logging.info(" Creating the data set for the {0} '{1}'.".
201 format(table.get("type", ""), table.get("title", "")))
202 data = input_data.filter_data(table, continue_on_error=True)
204 # Prepare the header of the tables
206 header = ["Test case", ]
208 if table["include-tests"] == "MRR":
209 hdr_param = "Receive Rate"
211 hdr_param = "Throughput"
213 history = table.get("history", None)
217 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218 "{0} Stdev [Mpps]".format(item["title"])])
220 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
225 header_str = ",".join(header) + "\n"
226 except (AttributeError, KeyError) as err:
227 logging.error("The model is invalid, missing parameter: {0}".
231 # Prepare data to the table:
233 for job, builds in table["reference"]["data"].items():
235 for tst_name, tst_data in data[job][str(build)].iteritems():
236 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237 replace("-ndrpdr", "").replace("-pdrdisc", "").\
238 replace("-ndrdisc", "").replace("-pdr", "").\
239 replace("-ndr", "").\
240 replace("1t1c", "1c").replace("2t1c", "1c").\
241 replace("2t2c", "2c").replace("4t2c", "2c").\
242 replace("4t4c", "4c").replace("8t4c", "4c")
243 if "across topologies" in table["title"].lower():
244 tst_name_mod = tst_name_mod.replace("2n1l-", "")
245 if tbl_dict.get(tst_name_mod, None) is None:
246 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
247 "-".join(tst_data["name"].
249 if "across testbeds" in table["title"].lower() or \
250 "across topologies" in table["title"].lower():
252 replace("1t1c", "1c").replace("2t1c", "1c").\
253 replace("2t2c", "2c").replace("4t2c", "2c").\
254 replace("4t4c", "4c").replace("8t4c", "4c")
255 tbl_dict[tst_name_mod] = {"name": name,
259 # TODO: Re-work when NDRPDRDISC tests are not used
260 if table["include-tests"] == "MRR":
261 tbl_dict[tst_name_mod]["ref-data"]. \
262 append(tst_data["result"]["receive-rate"].avg)
263 elif table["include-tests"] == "PDR":
264 if tst_data["type"] == "PDR":
265 tbl_dict[tst_name_mod]["ref-data"]. \
266 append(tst_data["throughput"]["value"])
267 elif tst_data["type"] == "NDRPDR":
268 tbl_dict[tst_name_mod]["ref-data"].append(
269 tst_data["throughput"]["PDR"]["LOWER"])
270 elif table["include-tests"] == "NDR":
271 if tst_data["type"] == "NDR":
272 tbl_dict[tst_name_mod]["ref-data"]. \
273 append(tst_data["throughput"]["value"])
274 elif tst_data["type"] == "NDRPDR":
275 tbl_dict[tst_name_mod]["ref-data"].append(
276 tst_data["throughput"]["NDR"]["LOWER"])
280 pass # No data in output.xml for this test
282 for job, builds in table["compare"]["data"].items():
284 for tst_name, tst_data in data[job][str(build)].iteritems():
285 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
286 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
287 replace("-ndrdisc", "").replace("-pdr", ""). \
288 replace("-ndr", "").\
289 replace("1t1c", "1c").replace("2t1c", "1c").\
290 replace("2t2c", "2c").replace("4t2c", "2c").\
291 replace("4t4c", "4c").replace("8t4c", "4c")
292 if "across topologies" in table["title"].lower():
293 tst_name_mod = tst_name_mod.replace("2n1l-", "")
295 # TODO: Re-work when NDRPDRDISC tests are not used
296 if table["include-tests"] == "MRR":
297 tbl_dict[tst_name_mod]["cmp-data"]. \
298 append(tst_data["result"]["receive-rate"].avg)
299 elif table["include-tests"] == "PDR":
300 if tst_data["type"] == "PDR":
301 tbl_dict[tst_name_mod]["cmp-data"]. \
302 append(tst_data["throughput"]["value"])
303 elif tst_data["type"] == "NDRPDR":
304 tbl_dict[tst_name_mod]["cmp-data"].append(
305 tst_data["throughput"]["PDR"]["LOWER"])
306 elif table["include-tests"] == "NDR":
307 if tst_data["type"] == "NDR":
308 tbl_dict[tst_name_mod]["cmp-data"]. \
309 append(tst_data["throughput"]["value"])
310 elif tst_data["type"] == "NDRPDR":
311 tbl_dict[tst_name_mod]["cmp-data"].append(
312 tst_data["throughput"]["NDR"]["LOWER"])
318 tbl_dict.pop(tst_name_mod, None)
321 for job, builds in item["data"].items():
323 for tst_name, tst_data in data[job][str(build)].iteritems():
324 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
325 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
326 replace("-ndrdisc", "").replace("-pdr", ""). \
327 replace("-ndr", "").\
328 replace("1t1c", "1c").replace("2t1c", "1c").\
329 replace("2t2c", "2c").replace("4t2c", "2c").\
330 replace("4t4c", "4c").replace("8t4c", "4c")
331 if "across topologies" in table["title"].lower():
332 tst_name_mod = tst_name_mod.replace("2n1l-", "")
333 if tbl_dict.get(tst_name_mod, None) is None:
335 if tbl_dict[tst_name_mod].get("history", None) is None:
336 tbl_dict[tst_name_mod]["history"] = OrderedDict()
337 if tbl_dict[tst_name_mod]["history"].get(item["title"],
339 tbl_dict[tst_name_mod]["history"][item["title"]] = \
342 # TODO: Re-work when NDRPDRDISC tests are not used
343 if table["include-tests"] == "MRR":
344 tbl_dict[tst_name_mod]["history"][item["title"
345 ]].append(tst_data["result"]["receive-rate"].
347 elif table["include-tests"] == "PDR":
348 if tst_data["type"] == "PDR":
349 tbl_dict[tst_name_mod]["history"][
351 append(tst_data["throughput"]["value"])
352 elif tst_data["type"] == "NDRPDR":
353 tbl_dict[tst_name_mod]["history"][item[
354 "title"]].append(tst_data["throughput"][
356 elif table["include-tests"] == "NDR":
357 if tst_data["type"] == "NDR":
358 tbl_dict[tst_name_mod]["history"][
360 append(tst_data["throughput"]["value"])
361 elif tst_data["type"] == "NDRPDR":
362 tbl_dict[tst_name_mod]["history"][item[
363 "title"]].append(tst_data["throughput"][
367 except (TypeError, KeyError):
371 for tst_name in tbl_dict.keys():
372 item = [tbl_dict[tst_name]["name"], ]
374 if tbl_dict[tst_name].get("history", None) is not None:
375 for hist_data in tbl_dict[tst_name]["history"].values():
377 item.append(round(mean(hist_data) / 1000000, 2))
378 item.append(round(stdev(hist_data) / 1000000, 2))
380 item.extend([None, None])
382 item.extend([None, None])
383 data_t = tbl_dict[tst_name]["ref-data"]
385 item.append(round(mean(data_t) / 1000000, 2))
386 item.append(round(stdev(data_t) / 1000000, 2))
388 item.extend([None, None])
389 data_t = tbl_dict[tst_name]["cmp-data"]
391 item.append(round(mean(data_t) / 1000000, 2))
392 item.append(round(stdev(data_t) / 1000000, 2))
394 item.extend([None, None])
395 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
396 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
397 if len(item) == len(header):
400 # Sort the table according to the relative change
401 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
403 # Generate csv tables:
404 csv_file = "{0}.csv".format(table["output-file"])
405 with open(csv_file, "w") as file_handler:
406 file_handler.write(header_str)
408 file_handler.write(",".join([str(item) for item in test]) + "\n")
410 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
413 def table_performance_trending_dashboard(table, input_data):
414 """Generate the table(s) with algorithm:
415 table_performance_trending_dashboard
416 specified in the specification file.
418 :param table: Table to generate.
419 :param input_data: Data to process.
420 :type table: pandas.Series
421 :type input_data: InputData
424 logging.info(" Generating the table {0} ...".
425 format(table.get("title", "")))
428 logging.info(" Creating the data set for the {0} '{1}'.".
429 format(table.get("type", ""), table.get("title", "")))
430 data = input_data.filter_data(table, continue_on_error=True)
432 # Prepare the header of the tables
433 header = ["Test Case",
435 "Short-Term Change [%]",
436 "Long-Term Change [%]",
440 header_str = ",".join(header) + "\n"
442 # Prepare data to the table:
444 for job, builds in table["data"].items():
446 for tst_name, tst_data in data[job][str(build)].iteritems():
447 if tst_name.lower() in table["ignore-list"]:
449 if tbl_dict.get(tst_name, None) is None:
450 groups = re.search(REGEX_NIC, tst_data["parent"])
453 nic = groups.group(0)
454 tbl_dict[tst_name] = {
455 "name": "{0}-{1}".format(nic, tst_data["name"]),
456 "data": OrderedDict()}
458 tbl_dict[tst_name]["data"][str(build)] = \
459 tst_data["result"]["receive-rate"]
460 except (TypeError, KeyError):
461 pass # No data in output.xml for this test
464 for tst_name in tbl_dict.keys():
465 data_t = tbl_dict[tst_name]["data"]
469 classification_lst, avgs = classify_anomalies(data_t)
471 win_size = min(len(data_t), table["window"])
472 long_win_size = min(len(data_t), table["long-trend-window"])
476 [x for x in avgs[-long_win_size:-win_size]
481 avg_week_ago = avgs[max(-win_size, -len(avgs))]
483 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
484 rel_change_last = nan
486 rel_change_last = round(
487 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
489 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
490 rel_change_long = nan
492 rel_change_long = round(
493 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
495 if classification_lst:
496 if isnan(rel_change_last) and isnan(rel_change_long):
499 [tbl_dict[tst_name]["name"],
500 '-' if isnan(last_avg) else
501 round(last_avg / 1000000, 2),
502 '-' if isnan(rel_change_last) else rel_change_last,
503 '-' if isnan(rel_change_long) else rel_change_long,
504 classification_lst[-win_size:].count("regression"),
505 classification_lst[-win_size:].count("progression")])
507 tbl_lst.sort(key=lambda rel: rel[0])
510 for nrr in range(table["window"], -1, -1):
511 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
512 for nrp in range(table["window"], -1, -1):
513 tbl_out = [item for item in tbl_reg if item[5] == nrp]
514 tbl_out.sort(key=lambda rel: rel[2])
515 tbl_sorted.extend(tbl_out)
517 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
519 logging.info(" Writing file: '{0}'".format(file_name))
520 with open(file_name, "w") as file_handler:
521 file_handler.write(header_str)
522 for test in tbl_sorted:
523 file_handler.write(",".join([str(item) for item in test]) + '\n')
525 txt_file_name = "{0}.txt".format(table["output-file"])
526 logging.info(" Writing file: '{0}'".format(txt_file_name))
527 convert_csv_to_pretty_txt(file_name, txt_file_name)
530 def _generate_url(base, testbed, test_name):
531 """Generate URL to a trending plot from the name of the test case.
533 :param base: The base part of URL common to all test cases.
534 :param testbed: The testbed used for testing.
535 :param test_name: The name of the test case.
539 :returns: The URL to the plot with the trending data for the given test
549 if "lbdpdk" in test_name or "lbvpp" in test_name:
550 file_name = "link_bonding"
552 elif "114b" in test_name and "vhost" in test_name:
555 elif "testpmd" in test_name or "l3fwd" in test_name:
558 elif "memif" in test_name:
559 file_name = "container_memif"
562 elif "srv6" in test_name:
565 elif "vhost" in test_name:
566 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
567 file_name = "vm_vhost_l2"
568 if "114b" in test_name:
570 elif "l2xcbase" in test_name:
571 feature = "-base-l2xc"
572 elif "l2bdbasemaclrn" in test_name:
573 feature = "-base-l2bd"
576 elif "ip4base" in test_name:
577 file_name = "vm_vhost_ip4"
580 elif "ipsec" in test_name:
582 feature = "-base-scale"
584 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
585 file_name = "ip4_tunnels"
588 elif "ip4base" in test_name or "ip4scale" in test_name:
590 if "xl710" in test_name:
591 feature = "-base-scale-features"
592 elif "iacl" in test_name:
593 feature = "-features-iacl"
594 elif "oacl" in test_name:
595 feature = "-features-oacl"
596 elif "snat" in test_name or "cop" in test_name:
597 feature = "-features"
599 feature = "-base-scale"
601 elif "ip6base" in test_name or "ip6scale" in test_name:
603 feature = "-base-scale"
605 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
606 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
607 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
609 if "macip" in test_name:
610 feature = "-features-macip"
611 elif "iacl" in test_name:
612 feature = "-features-iacl"
613 elif "oacl" in test_name:
614 feature = "-features-oacl"
616 feature = "-base-scale"
618 if "x520" in test_name:
620 elif "x710" in test_name:
622 elif "xl710" in test_name:
624 elif "xxv710" in test_name:
630 if "64b" in test_name:
632 elif "78b" in test_name:
634 elif "imix" in test_name:
636 elif "9000b" in test_name:
638 elif "1518b" in test_name:
640 elif "114b" in test_name:
644 anchor += framesize + '-'
646 if "1t1c" in test_name:
648 elif "2t2c" in test_name:
650 elif "4t4c" in test_name:
652 elif "2t1c" in test_name:
654 elif "4t2c" in test_name:
656 elif "8t4c" in test_name:
659 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
663 def table_performance_trending_dashboard_html(table, input_data):
664 """Generate the table(s) with algorithm:
665 table_performance_trending_dashboard_html specified in the specification
668 :param table: Table to generate.
669 :param input_data: Data to process.
671 :type input_data: InputData
674 testbed = table.get("testbed", None)
676 logging.error("The testbed is not defined for the table '{0}'.".
677 format(table.get("title", "")))
680 logging.info(" Generating the table {0} ...".
681 format(table.get("title", "")))
684 with open(table["input-file"], 'rb') as csv_file:
685 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
686 csv_lst = [item for item in csv_content]
688 logging.warning("The input file is not defined.")
690 except csv.Error as err:
691 logging.warning("Not possible to process the file '{0}'.\n{1}".
692 format(table["input-file"], err))
696 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
699 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
700 for idx, item in enumerate(csv_lst[0]):
701 alignment = "left" if idx == 0 else "center"
702 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
706 colors = {"regression": ("#ffcccc", "#ff9999"),
707 "progression": ("#c6ecc6", "#9fdf9f"),
708 "normal": ("#e9f1fb", "#d4e4f7")}
709 for r_idx, row in enumerate(csv_lst[1:]):
713 color = "progression"
716 background = colors[color][r_idx % 2]
717 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
720 for c_idx, item in enumerate(row):
721 alignment = "left" if c_idx == 0 else "center"
722 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
725 url = _generate_url("../trending/", testbed, item)
726 ref = ET.SubElement(td, "a", attrib=dict(href=url))
731 with open(table["output-file"], 'w') as html_file:
732 logging.info(" Writing file: '{0}'".format(table["output-file"]))
733 html_file.write(".. raw:: html\n\n\t")
734 html_file.write(ET.tostring(dashboard))
735 html_file.write("\n\t<p><br><br></p>\n")
737 logging.warning("The output file is not defined.")
741 def table_failed_tests(table, input_data):
742 """Generate the table(s) with algorithm: table_failed_tests
743 specified in the specification file.
745 :param table: Table to generate.
746 :param input_data: Data to process.
747 :type table: pandas.Series
748 :type input_data: InputData
751 logging.info(" Generating the table {0} ...".
752 format(table.get("title", "")))
755 logging.info(" Creating the data set for the {0} '{1}'.".
756 format(table.get("type", ""), table.get("title", "")))
757 data = input_data.filter_data(table, continue_on_error=True)
759 # Prepare the header of the tables
760 header = ["Test Case",
762 "Last Failure [Time]",
763 "Last Failure [VPP-Build-Id]",
764 "Last Failure [CSIT-Job-Build-Id]"]
766 # Generate the data for the table according to the model in the table
769 for job, builds in table["data"].items():
772 for tst_name, tst_data in data[job][build].iteritems():
773 if tst_name.lower() in table["ignore-list"]:
775 if tbl_dict.get(tst_name, None) is None:
776 groups = re.search(REGEX_NIC, tst_data["parent"])
779 nic = groups.group(0)
780 tbl_dict[tst_name] = {
781 "name": "{0}-{1}".format(nic, tst_data["name"]),
782 "data": OrderedDict()}
784 tbl_dict[tst_name]["data"][build] = (
786 input_data.metadata(job, build).get("generated", ""),
787 input_data.metadata(job, build).get("version", ""),
789 except (TypeError, KeyError):
790 pass # No data in output.xml for this test
793 for tst_data in tbl_dict.values():
794 win_size = min(len(tst_data["data"]), table["window"])
796 for val in tst_data["data"].values()[-win_size:]:
799 fails_last_date = val[1]
800 fails_last_vpp = val[2]
801 fails_last_csit = val[3]
803 tbl_lst.append([tst_data["name"],
807 "mrr-daily-build-{0}".format(fails_last_csit)])
809 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
811 for nrf in range(table["window"], -1, -1):
812 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
813 tbl_sorted.extend(tbl_fails)
814 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
816 logging.info(" Writing file: '{0}'".format(file_name))
817 with open(file_name, "w") as file_handler:
818 file_handler.write(",".join(header) + "\n")
819 for test in tbl_sorted:
820 file_handler.write(",".join([str(item) for item in test]) + '\n')
822 txt_file_name = "{0}.txt".format(table["output-file"])
823 logging.info(" Writing file: '{0}'".format(txt_file_name))
824 convert_csv_to_pretty_txt(file_name, txt_file_name)
827 def table_failed_tests_html(table, input_data):
828 """Generate the table(s) with algorithm: table_failed_tests_html
829 specified in the specification file.
831 :param table: Table to generate.
832 :param input_data: Data to process.
833 :type table: pandas.Series
834 :type input_data: InputData
837 testbed = table.get("testbed", None)
839 logging.error("The testbed is not defined for the table '{0}'.".
840 format(table.get("title", "")))
843 logging.info(" Generating the table {0} ...".
844 format(table.get("title", "")))
847 with open(table["input-file"], 'rb') as csv_file:
848 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
849 csv_lst = [item for item in csv_content]
851 logging.warning("The input file is not defined.")
853 except csv.Error as err:
854 logging.warning("Not possible to process the file '{0}'.\n{1}".
855 format(table["input-file"], err))
859 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
862 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
863 for idx, item in enumerate(csv_lst[0]):
864 alignment = "left" if idx == 0 else "center"
865 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
869 colors = ("#e9f1fb", "#d4e4f7")
870 for r_idx, row in enumerate(csv_lst[1:]):
871 background = colors[r_idx % 2]
872 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
875 for c_idx, item in enumerate(row):
876 alignment = "left" if c_idx == 0 else "center"
877 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
880 url = _generate_url("../trending/", testbed, item)
881 ref = ET.SubElement(td, "a", attrib=dict(href=url))
886 with open(table["output-file"], 'w') as html_file:
887 logging.info(" Writing file: '{0}'".format(table["output-file"]))
888 html_file.write(".. raw:: html\n\n\t")
889 html_file.write(ET.tostring(failed_tests))
890 html_file.write("\n\t<p><br><br></p>\n")
892 logging.warning("The output file is not defined.")