1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34 def generate_tables(spec, data):
35 """Generate all tables specified in the specification file.
37 :param spec: Specification read from the specification file.
38 :param data: Data to process.
39 :type spec: Specification
43 logging.info("Generating the tables ...")
44 for table in spec.tables:
46 eval(table["algorithm"])(table, data)
47 except NameError as err:
48 logging.error("Probably algorithm '{alg}' is not defined: {err}".
49 format(alg=table["algorithm"], err=repr(err)))
53 def table_details(table, input_data):
54 """Generate the table(s) with algorithm: table_detailed_test_results
55 specified in the specification file.
57 :param table: Table to generate.
58 :param input_data: Data to process.
59 :type table: pandas.Series
60 :type input_data: InputData
63 logging.info(" Generating the table {0} ...".
64 format(table.get("title", "")))
67 logging.info(" Creating the data set for the {0} '{1}'.".
68 format(table.get("type", ""), table.get("title", "")))
69 data = input_data.filter_data(table)
71 # Prepare the header of the tables
73 for column in table["columns"]:
74 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
76 # Generate the data for the table according to the model in the table
78 job = table["data"].keys()[0]
79 build = str(table["data"][job][0])
81 suites = input_data.suites(job, build)
83 logging.error(" No data available. The table will not be generated.")
86 for suite_longname, suite in suites.iteritems():
88 suite_name = suite["name"]
90 for test in data[job][build].keys():
91 if data[job][build][test]["parent"] in suite_name:
93 for column in table["columns"]:
95 col_data = str(data[job][build][test][column["data"].
96 split(" ")[1]]).replace('"', '""')
97 if column["data"].split(" ")[1] in ("vat-history",
99 col_data = replace(col_data, " |br| ", "",
101 col_data = " |prein| {0} |preout| ".\
102 format(col_data[:-5])
103 row_lst.append('"{0}"'.format(col_data))
105 row_lst.append("No data")
106 table_lst.append(row_lst)
108 # Write the data to file
110 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111 table["output-file-ext"])
112 logging.info(" Writing file: '{}'".format(file_name))
113 with open(file_name, "w") as file_handler:
114 file_handler.write(",".join(header) + "\n")
115 for item in table_lst:
116 file_handler.write(",".join(item) + "\n")
118 logging.info(" Done.")
121 def table_merged_details(table, input_data):
122 """Generate the table(s) with algorithm: table_merged_details
123 specified in the specification file.
125 :param table: Table to generate.
126 :param input_data: Data to process.
127 :type table: pandas.Series
128 :type input_data: InputData
131 logging.info(" Generating the table {0} ...".
132 format(table.get("title", "")))
135 logging.info(" Creating the data set for the {0} '{1}'.".
136 format(table.get("type", ""), table.get("title", "")))
137 data = input_data.filter_data(table)
138 data = input_data.merge_data(data)
139 data.sort_index(inplace=True)
141 logging.info(" Creating the data set for the {0} '{1}'.".
142 format(table.get("type", ""), table.get("title", "")))
143 suites = input_data.filter_data(table, data_set="suites")
144 suites = input_data.merge_data(suites)
146 # Prepare the header of the tables
148 for column in table["columns"]:
149 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
151 for _, suite in suites.iteritems():
153 suite_name = suite["name"]
155 for test in data.keys():
156 if data[test]["parent"] in suite_name:
158 for column in table["columns"]:
160 col_data = str(data[test][column["data"].
161 split(" ")[1]]).replace('"', '""')
162 if column["data"].split(" ")[1] in ("vat-history",
164 col_data = replace(col_data, " |br| ", "",
166 col_data = " |prein| {0} |preout| ".\
167 format(col_data[:-5])
168 row_lst.append('"{0}"'.format(col_data))
170 row_lst.append("No data")
171 table_lst.append(row_lst)
173 # Write the data to file
175 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176 table["output-file-ext"])
177 logging.info(" Writing file: '{}'".format(file_name))
178 with open(file_name, "w") as file_handler:
179 file_handler.write(",".join(header) + "\n")
180 for item in table_lst:
181 file_handler.write(",".join(item) + "\n")
183 logging.info(" Done.")
186 def table_performance_comparison(table, input_data):
187 """Generate the table(s) with algorithm: table_performance_comparison
188 specified in the specification file.
190 :param table: Table to generate.
191 :param input_data: Data to process.
192 :type table: pandas.Series
193 :type input_data: InputData
196 logging.info(" Generating the table {0} ...".
197 format(table.get("title", "")))
200 logging.info(" Creating the data set for the {0} '{1}'.".
201 format(table.get("type", ""), table.get("title", "")))
202 data = input_data.filter_data(table, continue_on_error=True)
204 # Prepare the header of the tables
206 header = ["Test case", ]
208 if table["include-tests"] == "MRR":
209 hdr_param = "Receive Rate"
211 hdr_param = "Throughput"
213 history = table.get("history", None)
217 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218 "{0} Stdev [Mpps]".format(item["title"])])
220 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
225 header_str = ",".join(header) + "\n"
226 except (AttributeError, KeyError) as err:
227 logging.error("The model is invalid, missing parameter: {0}".
231 # Prepare data to the table:
233 for job, builds in table["reference"]["data"].items():
235 for tst_name, tst_data in data[job][str(build)].iteritems():
236 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237 replace("-ndrpdr", "").replace("-pdrdisc", "").\
238 replace("-ndrdisc", "").replace("-pdr", "").\
239 replace("-ndr", "").\
240 replace("1t1c", "1c").replace("2t1c", "1c").\
241 replace("2t2c", "2c").replace("4t2c", "2c").\
242 replace("4t4c", "4c").replace("8t4c", "4c")
243 if tbl_dict.get(tst_name_mod, None) is None:
244 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
245 "-".join(tst_data["name"].
247 if "comparison across testbeds" in table["title"].lower():
249 replace("1t1c", "1c").replace("2t1c", "1c").\
250 replace("2t2c", "2c").replace("4t2c", "2c").\
251 replace("4t4c", "4c").replace("8t4c", "4c")
252 tbl_dict[tst_name_mod] = {"name": name,
256 # TODO: Re-work when NDRPDRDISC tests are not used
257 if table["include-tests"] == "MRR":
258 tbl_dict[tst_name_mod]["ref-data"]. \
259 append(tst_data["result"]["receive-rate"].avg)
260 elif table["include-tests"] == "PDR":
261 if tst_data["type"] == "PDR":
262 tbl_dict[tst_name_mod]["ref-data"]. \
263 append(tst_data["throughput"]["value"])
264 elif tst_data["type"] == "NDRPDR":
265 tbl_dict[tst_name_mod]["ref-data"].append(
266 tst_data["throughput"]["PDR"]["LOWER"])
267 elif table["include-tests"] == "NDR":
268 if tst_data["type"] == "NDR":
269 tbl_dict[tst_name_mod]["ref-data"]. \
270 append(tst_data["throughput"]["value"])
271 elif tst_data["type"] == "NDRPDR":
272 tbl_dict[tst_name_mod]["ref-data"].append(
273 tst_data["throughput"]["NDR"]["LOWER"])
277 pass # No data in output.xml for this test
279 for job, builds in table["compare"]["data"].items():
281 for tst_name, tst_data in data[job][str(build)].iteritems():
282 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
283 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
284 replace("-ndrdisc", "").replace("-pdr", ""). \
285 replace("-ndr", "").\
286 replace("1t1c", "1c").replace("2t1c", "1c").\
287 replace("2t2c", "2c").replace("4t2c", "2c").\
288 replace("4t4c", "4c").replace("8t4c", "4c")
290 # TODO: Re-work when NDRPDRDISC tests are not used
291 if table["include-tests"] == "MRR":
292 tbl_dict[tst_name_mod]["cmp-data"]. \
293 append(tst_data["result"]["receive-rate"].avg)
294 elif table["include-tests"] == "PDR":
295 if tst_data["type"] == "PDR":
296 tbl_dict[tst_name_mod]["cmp-data"]. \
297 append(tst_data["throughput"]["value"])
298 elif tst_data["type"] == "NDRPDR":
299 tbl_dict[tst_name_mod]["cmp-data"].append(
300 tst_data["throughput"]["PDR"]["LOWER"])
301 elif table["include-tests"] == "NDR":
302 if tst_data["type"] == "NDR":
303 tbl_dict[tst_name_mod]["cmp-data"]. \
304 append(tst_data["throughput"]["value"])
305 elif tst_data["type"] == "NDRPDR":
306 tbl_dict[tst_name_mod]["cmp-data"].append(
307 tst_data["throughput"]["NDR"]["LOWER"])
313 tbl_dict.pop(tst_name_mod, None)
316 for job, builds in item["data"].items():
318 for tst_name, tst_data in data[job][str(build)].iteritems():
319 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
320 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
321 replace("-ndrdisc", "").replace("-pdr", ""). \
322 replace("-ndr", "").\
323 replace("1t1c", "1c").replace("2t1c", "1c").\
324 replace("2t2c", "2c").replace("4t2c", "2c").\
325 replace("4t4c", "4c").replace("8t4c", "4c")
326 if tbl_dict.get(tst_name_mod, None) is None:
328 if tbl_dict[tst_name_mod].get("history", None) is None:
329 tbl_dict[tst_name_mod]["history"] = OrderedDict()
330 if tbl_dict[tst_name_mod]["history"].get(item["title"],
332 tbl_dict[tst_name_mod]["history"][item["title"]] = \
335 # TODO: Re-work when NDRPDRDISC tests are not used
336 if table["include-tests"] == "MRR":
337 tbl_dict[tst_name_mod]["history"][item["title"
338 ]].append(tst_data["result"]["receive-rate"].
340 elif table["include-tests"] == "PDR":
341 if tst_data["type"] == "PDR":
342 tbl_dict[tst_name_mod]["history"][
344 append(tst_data["throughput"]["value"])
345 elif tst_data["type"] == "NDRPDR":
346 tbl_dict[tst_name_mod]["history"][item[
347 "title"]].append(tst_data["throughput"][
349 elif table["include-tests"] == "NDR":
350 if tst_data["type"] == "NDR":
351 tbl_dict[tst_name_mod]["history"][
353 append(tst_data["throughput"]["value"])
354 elif tst_data["type"] == "NDRPDR":
355 tbl_dict[tst_name_mod]["history"][item[
356 "title"]].append(tst_data["throughput"][
360 except (TypeError, KeyError):
364 for tst_name in tbl_dict.keys():
365 item = [tbl_dict[tst_name]["name"], ]
367 if tbl_dict[tst_name].get("history", None) is not None:
368 for hist_data in tbl_dict[tst_name]["history"].values():
370 item.append(round(mean(hist_data) / 1000000, 2))
371 item.append(round(stdev(hist_data) / 1000000, 2))
373 item.extend([None, None])
375 item.extend([None, None])
376 data_t = tbl_dict[tst_name]["ref-data"]
378 item.append(round(mean(data_t) / 1000000, 2))
379 item.append(round(stdev(data_t) / 1000000, 2))
381 item.extend([None, None])
382 data_t = tbl_dict[tst_name]["cmp-data"]
384 item.append(round(mean(data_t) / 1000000, 2))
385 item.append(round(stdev(data_t) / 1000000, 2))
387 item.extend([None, None])
388 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
389 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
390 if len(item) == len(header):
393 # Sort the table according to the relative change
394 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
396 # Generate csv tables:
397 csv_file = "{0}.csv".format(table["output-file"])
398 with open(csv_file, "w") as file_handler:
399 file_handler.write(header_str)
401 file_handler.write(",".join([str(item) for item in test]) + "\n")
403 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
406 def table_performance_trending_dashboard(table, input_data):
407 """Generate the table(s) with algorithm:
408 table_performance_trending_dashboard
409 specified in the specification file.
411 :param table: Table to generate.
412 :param input_data: Data to process.
413 :type table: pandas.Series
414 :type input_data: InputData
417 logging.info(" Generating the table {0} ...".
418 format(table.get("title", "")))
421 logging.info(" Creating the data set for the {0} '{1}'.".
422 format(table.get("type", ""), table.get("title", "")))
423 data = input_data.filter_data(table, continue_on_error=True)
425 # Prepare the header of the tables
426 header = ["Test Case",
428 "Short-Term Change [%]",
429 "Long-Term Change [%]",
433 header_str = ",".join(header) + "\n"
435 # Prepare data to the table:
437 for job, builds in table["data"].items():
439 for tst_name, tst_data in data[job][str(build)].iteritems():
440 if tst_name.lower() in table["ignore-list"]:
442 if tbl_dict.get(tst_name, None) is None:
443 groups = re.search(REGEX_NIC, tst_data["parent"])
446 nic = groups.group(0)
447 tbl_dict[tst_name] = {
448 "name": "{0}-{1}".format(nic, tst_data["name"]),
449 "data": OrderedDict()}
451 tbl_dict[tst_name]["data"][str(build)] = \
452 tst_data["result"]["receive-rate"]
453 except (TypeError, KeyError):
454 pass # No data in output.xml for this test
457 for tst_name in tbl_dict.keys():
458 data_t = tbl_dict[tst_name]["data"]
462 classification_lst, avgs = classify_anomalies(data_t)
464 win_size = min(len(data_t), table["window"])
465 long_win_size = min(len(data_t), table["long-trend-window"])
469 [x for x in avgs[-long_win_size:-win_size]
474 avg_week_ago = avgs[max(-win_size, -len(avgs))]
476 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
477 rel_change_last = nan
479 rel_change_last = round(
480 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
482 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
483 rel_change_long = nan
485 rel_change_long = round(
486 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
488 if classification_lst:
489 if isnan(rel_change_last) and isnan(rel_change_long):
492 [tbl_dict[tst_name]["name"],
493 '-' if isnan(last_avg) else
494 round(last_avg / 1000000, 2),
495 '-' if isnan(rel_change_last) else rel_change_last,
496 '-' if isnan(rel_change_long) else rel_change_long,
497 classification_lst[-win_size:].count("regression"),
498 classification_lst[-win_size:].count("progression")])
500 tbl_lst.sort(key=lambda rel: rel[0])
503 for nrr in range(table["window"], -1, -1):
504 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
505 for nrp in range(table["window"], -1, -1):
506 tbl_out = [item for item in tbl_reg if item[5] == nrp]
507 tbl_out.sort(key=lambda rel: rel[2])
508 tbl_sorted.extend(tbl_out)
510 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
512 logging.info(" Writing file: '{0}'".format(file_name))
513 with open(file_name, "w") as file_handler:
514 file_handler.write(header_str)
515 for test in tbl_sorted:
516 file_handler.write(",".join([str(item) for item in test]) + '\n')
518 txt_file_name = "{0}.txt".format(table["output-file"])
519 logging.info(" Writing file: '{0}'".format(txt_file_name))
520 convert_csv_to_pretty_txt(file_name, txt_file_name)
523 def _generate_url(base, testbed, test_name):
524 """Generate URL to a trending plot from the name of the test case.
526 :param base: The base part of URL common to all test cases.
527 :param testbed: The testbed used for testing.
528 :param test_name: The name of the test case.
532 :returns: The URL to the plot with the trending data for the given test
542 if "lbdpdk" in test_name or "lbvpp" in test_name:
543 file_name = "link_bonding"
545 elif "testpmd" in test_name or "l3fwd" in test_name:
548 elif "memif" in test_name:
549 file_name = "container_memif"
552 elif "srv6" in test_name:
555 elif "vhost" in test_name:
556 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
557 file_name = "vm_vhost_l2"
558 if "l2xcbase" in test_name:
559 feature = "-base-l2xc"
560 elif "l2bdbasemaclrn" in test_name:
561 feature = "-base-l2bd"
564 elif "ip4base" in test_name:
565 file_name = "vm_vhost_ip4"
568 elif "ipsec" in test_name:
570 feature = "-base-scale"
572 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
573 file_name = "ip4_tunnels"
576 elif "ip4base" in test_name or "ip4scale" in test_name:
578 if "xl710" in test_name:
579 feature = "-base-scale-features"
580 elif "iacl" in test_name:
581 feature = "-features-iacl"
582 elif "oacl" in test_name:
583 feature = "-features-oacl"
584 elif "snat" in test_name or "cop" in test_name:
585 feature = "-features"
587 feature = "-base-scale"
589 elif "ip6base" in test_name or "ip6scale" in test_name:
591 feature = "-base-scale"
593 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
594 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
595 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
597 if "macip" in test_name:
598 feature = "-features-macip"
599 elif "iacl" in test_name:
600 feature = "-features-iacl"
601 elif "oacl" in test_name:
602 feature = "-features-oacl"
604 feature = "-base-scale"
606 if "x520" in test_name:
608 elif "x710" in test_name:
610 elif "xl710" in test_name:
612 elif "xxv710" in test_name:
618 if "64b" in test_name:
620 elif "78b" in test_name:
622 elif "imix" in test_name:
624 elif "9000b" in test_name:
626 elif "1518b" in test_name:
628 elif "114b" in test_name:
632 anchor += framesize + '-'
634 if "1t1c" in test_name:
636 elif "2t2c" in test_name:
638 elif "4t4c" in test_name:
640 elif "2t1c" in test_name:
642 elif "4t2c" in test_name:
644 elif "8t4c" in test_name:
647 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
651 def table_performance_trending_dashboard_html(table, input_data):
652 """Generate the table(s) with algorithm:
653 table_performance_trending_dashboard_html specified in the specification
656 :param table: Table to generate.
657 :param input_data: Data to process.
659 :type input_data: InputData
662 testbed = table.get("testbed", None)
664 logging.error("The testbed is not defined for the table '{0}'.".
665 format(table.get("title", "")))
668 logging.info(" Generating the table {0} ...".
669 format(table.get("title", "")))
672 with open(table["input-file"], 'rb') as csv_file:
673 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
674 csv_lst = [item for item in csv_content]
676 logging.warning("The input file is not defined.")
678 except csv.Error as err:
679 logging.warning("Not possible to process the file '{0}'.\n{1}".
680 format(table["input-file"], err))
684 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
687 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
688 for idx, item in enumerate(csv_lst[0]):
689 alignment = "left" if idx == 0 else "center"
690 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
694 colors = {"regression": ("#ffcccc", "#ff9999"),
695 "progression": ("#c6ecc6", "#9fdf9f"),
696 "normal": ("#e9f1fb", "#d4e4f7")}
697 for r_idx, row in enumerate(csv_lst[1:]):
701 color = "progression"
704 background = colors[color][r_idx % 2]
705 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
708 for c_idx, item in enumerate(row):
709 alignment = "left" if c_idx == 0 else "center"
710 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
713 url = _generate_url("../trending/", testbed, item)
714 ref = ET.SubElement(td, "a", attrib=dict(href=url))
719 with open(table["output-file"], 'w') as html_file:
720 logging.info(" Writing file: '{0}'".format(table["output-file"]))
721 html_file.write(".. raw:: html\n\n\t")
722 html_file.write(ET.tostring(dashboard))
723 html_file.write("\n\t<p><br><br></p>\n")
725 logging.warning("The output file is not defined.")
729 def table_failed_tests(table, input_data):
730 """Generate the table(s) with algorithm: table_failed_tests
731 specified in the specification file.
733 :param table: Table to generate.
734 :param input_data: Data to process.
735 :type table: pandas.Series
736 :type input_data: InputData
739 logging.info(" Generating the table {0} ...".
740 format(table.get("title", "")))
743 logging.info(" Creating the data set for the {0} '{1}'.".
744 format(table.get("type", ""), table.get("title", "")))
745 data = input_data.filter_data(table, continue_on_error=True)
747 # Prepare the header of the tables
748 header = ["Test Case",
750 "Last Failure [Time]",
751 "Last Failure [VPP-Build-Id]",
752 "Last Failure [CSIT-Job-Build-Id]"]
754 # Generate the data for the table according to the model in the table
757 for job, builds in table["data"].items():
760 for tst_name, tst_data in data[job][build].iteritems():
761 if tst_name.lower() in table["ignore-list"]:
763 if tbl_dict.get(tst_name, None) is None:
764 groups = re.search(REGEX_NIC, tst_data["parent"])
767 nic = groups.group(0)
768 tbl_dict[tst_name] = {
769 "name": "{0}-{1}".format(nic, tst_data["name"]),
770 "data": OrderedDict()}
772 tbl_dict[tst_name]["data"][build] = (
774 input_data.metadata(job, build).get("generated", ""),
775 input_data.metadata(job, build).get("version", ""),
777 except (TypeError, KeyError):
778 pass # No data in output.xml for this test
781 for tst_data in tbl_dict.values():
782 win_size = min(len(tst_data["data"]), table["window"])
784 for val in tst_data["data"].values()[-win_size:]:
787 fails_last_date = val[1]
788 fails_last_vpp = val[2]
789 fails_last_csit = val[3]
791 tbl_lst.append([tst_data["name"],
795 "mrr-daily-build-{0}".format(fails_last_csit)])
797 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
799 for nrf in range(table["window"], -1, -1):
800 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
801 tbl_sorted.extend(tbl_fails)
802 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
804 logging.info(" Writing file: '{0}'".format(file_name))
805 with open(file_name, "w") as file_handler:
806 file_handler.write(",".join(header) + "\n")
807 for test in tbl_sorted:
808 file_handler.write(",".join([str(item) for item in test]) + '\n')
810 txt_file_name = "{0}.txt".format(table["output-file"])
811 logging.info(" Writing file: '{0}'".format(txt_file_name))
812 convert_csv_to_pretty_txt(file_name, txt_file_name)
815 def table_failed_tests_html(table, input_data):
816 """Generate the table(s) with algorithm: table_failed_tests_html
817 specified in the specification file.
819 :param table: Table to generate.
820 :param input_data: Data to process.
821 :type table: pandas.Series
822 :type input_data: InputData
825 testbed = table.get("testbed", None)
827 logging.error("The testbed is not defined for the table '{0}'.".
828 format(table.get("title", "")))
831 logging.info(" Generating the table {0} ...".
832 format(table.get("title", "")))
835 with open(table["input-file"], 'rb') as csv_file:
836 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
837 csv_lst = [item for item in csv_content]
839 logging.warning("The input file is not defined.")
841 except csv.Error as err:
842 logging.warning("Not possible to process the file '{0}'.\n{1}".
843 format(table["input-file"], err))
847 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
850 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
851 for idx, item in enumerate(csv_lst[0]):
852 alignment = "left" if idx == 0 else "center"
853 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
857 colors = ("#e9f1fb", "#d4e4f7")
858 for r_idx, row in enumerate(csv_lst[1:]):
859 background = colors[r_idx % 2]
860 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
863 for c_idx, item in enumerate(row):
864 alignment = "left" if c_idx == 0 else "center"
865 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
868 url = _generate_url("../trending/", testbed, item)
869 ref = ET.SubElement(td, "a", attrib=dict(href=url))
874 with open(table["output-file"], 'w') as html_file:
875 logging.info(" Writing file: '{0}'".format(table["output-file"]))
876 html_file.write(".. raw:: html\n\n\t")
877 html_file.write(ET.tostring(failed_tests))
878 html_file.write("\n\t<p><br><br></p>\n")
880 logging.warning("The output file is not defined.")