1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34 def generate_tables(spec, data):
35 """Generate all tables specified in the specification file.
37 :param spec: Specification read from the specification file.
38 :param data: Data to process.
39 :type spec: Specification
43 logging.info("Generating the tables ...")
44 for table in spec.tables:
46 eval(table["algorithm"])(table, data)
47 except NameError as err:
48 logging.error("Probably algorithm '{alg}' is not defined: {err}".
49 format(alg=table["algorithm"], err=repr(err)))
53 def table_details(table, input_data):
54 """Generate the table(s) with algorithm: table_detailed_test_results
55 specified in the specification file.
57 :param table: Table to generate.
58 :param input_data: Data to process.
59 :type table: pandas.Series
60 :type input_data: InputData
63 logging.info(" Generating the table {0} ...".
64 format(table.get("title", "")))
67 logging.info(" Creating the data set for the {0} '{1}'.".
68 format(table.get("type", ""), table.get("title", "")))
69 data = input_data.filter_data(table)
71 # Prepare the header of the tables
73 for column in table["columns"]:
74 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
76 # Generate the data for the table according to the model in the table
78 job = table["data"].keys()[0]
79 build = str(table["data"][job][0])
81 suites = input_data.suites(job, build)
83 logging.error(" No data available. The table will not be generated.")
86 for suite_longname, suite in suites.iteritems():
88 suite_name = suite["name"]
90 for test in data[job][build].keys():
91 if data[job][build][test]["parent"] in suite_name:
93 for column in table["columns"]:
95 col_data = str(data[job][build][test][column["data"].
96 split(" ")[1]]).replace('"', '""')
97 if column["data"].split(" ")[1] in ("vat-history",
99 col_data = replace(col_data, " |br| ", "",
101 col_data = " |prein| {0} |preout| ".\
102 format(col_data[:-5])
103 row_lst.append('"{0}"'.format(col_data))
105 row_lst.append("No data")
106 table_lst.append(row_lst)
108 # Write the data to file
110 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111 table["output-file-ext"])
112 logging.info(" Writing file: '{}'".format(file_name))
113 with open(file_name, "w") as file_handler:
114 file_handler.write(",".join(header) + "\n")
115 for item in table_lst:
116 file_handler.write(",".join(item) + "\n")
118 logging.info(" Done.")
121 def table_merged_details(table, input_data):
122 """Generate the table(s) with algorithm: table_merged_details
123 specified in the specification file.
125 :param table: Table to generate.
126 :param input_data: Data to process.
127 :type table: pandas.Series
128 :type input_data: InputData
131 logging.info(" Generating the table {0} ...".
132 format(table.get("title", "")))
135 logging.info(" Creating the data set for the {0} '{1}'.".
136 format(table.get("type", ""), table.get("title", "")))
137 data = input_data.filter_data(table)
138 data = input_data.merge_data(data)
139 data.sort_index(inplace=True)
141 logging.info(" Creating the data set for the {0} '{1}'.".
142 format(table.get("type", ""), table.get("title", "")))
143 suites = input_data.filter_data(table, data_set="suites")
144 suites = input_data.merge_data(suites)
146 # Prepare the header of the tables
148 for column in table["columns"]:
149 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
151 for _, suite in suites.iteritems():
153 suite_name = suite["name"]
155 for test in data.keys():
156 if data[test]["parent"] in suite_name:
158 for column in table["columns"]:
160 col_data = str(data[test][column["data"].
161 split(" ")[1]]).replace('"', '""')
162 if column["data"].split(" ")[1] in ("vat-history",
164 col_data = replace(col_data, " |br| ", "",
166 col_data = " |prein| {0} |preout| ".\
167 format(col_data[:-5])
168 row_lst.append('"{0}"'.format(col_data))
170 row_lst.append("No data")
171 table_lst.append(row_lst)
173 # Write the data to file
175 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176 table["output-file-ext"])
177 logging.info(" Writing file: '{}'".format(file_name))
178 with open(file_name, "w") as file_handler:
179 file_handler.write(",".join(header) + "\n")
180 for item in table_lst:
181 file_handler.write(",".join(item) + "\n")
183 logging.info(" Done.")
186 def table_performance_comparison(table, input_data):
187 """Generate the table(s) with algorithm: table_performance_comparison
188 specified in the specification file.
190 :param table: Table to generate.
191 :param input_data: Data to process.
192 :type table: pandas.Series
193 :type input_data: InputData
196 logging.info(" Generating the table {0} ...".
197 format(table.get("title", "")))
200 logging.info(" Creating the data set for the {0} '{1}'.".
201 format(table.get("type", ""), table.get("title", "")))
202 data = input_data.filter_data(table, continue_on_error=True)
204 # Prepare the header of the tables
206 header = ["Test case", ]
208 if table["include-tests"] == "MRR":
209 hdr_param = "Receive Rate"
211 hdr_param = "Throughput"
213 history = table.get("history", None)
217 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218 "{0} Stdev [Mpps]".format(item["title"])])
220 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
225 header_str = ",".join(header) + "\n"
226 except (AttributeError, KeyError) as err:
227 logging.error("The model is invalid, missing parameter: {0}".
231 # Prepare data to the table:
233 for job, builds in table["reference"]["data"].items():
235 for tst_name, tst_data in data[job][str(build)].iteritems():
236 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237 replace("-ndrpdr", "").replace("-pdrdisc", "").\
238 replace("-ndrdisc", "").replace("-pdr", "").\
239 replace("-ndr", "").\
240 replace("1t1c", "1c").replace("2t1c", "1c").\
241 replace("2t2c", "2c").replace("4t2c", "2c").\
242 replace("4t4c", "4c").replace("8t4c", "4c")
243 if tbl_dict.get(tst_name_mod, None) is None:
244 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
245 "-".join(tst_data["name"].
247 if "comparison across testbeds" in table["title"].lower():
249 replace("1t1c", "1c").replace("2t1c", "1c").\
250 replace("2t2c", "2c").replace("4t2c", "2c").\
251 replace("4t4c", "4c").replace("8t4c", "4c")
252 tbl_dict[tst_name_mod] = {"name": name,
256 # TODO: Re-work when NDRPDRDISC tests are not used
257 if table["include-tests"] == "MRR":
258 tbl_dict[tst_name_mod]["ref-data"]. \
259 append(tst_data["result"]["receive-rate"].avg)
260 elif table["include-tests"] == "PDR":
261 if tst_data["type"] == "PDR":
262 tbl_dict[tst_name_mod]["ref-data"]. \
263 append(tst_data["throughput"]["value"])
264 elif tst_data["type"] == "NDRPDR":
265 tbl_dict[tst_name_mod]["ref-data"].append(
266 tst_data["throughput"]["PDR"]["LOWER"])
267 elif table["include-tests"] == "NDR":
268 if tst_data["type"] == "NDR":
269 tbl_dict[tst_name_mod]["ref-data"]. \
270 append(tst_data["throughput"]["value"])
271 elif tst_data["type"] == "NDRPDR":
272 tbl_dict[tst_name_mod]["ref-data"].append(
273 tst_data["throughput"]["NDR"]["LOWER"])
277 pass # No data in output.xml for this test
279 for job, builds in table["compare"]["data"].items():
281 for tst_name, tst_data in data[job][str(build)].iteritems():
282 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
283 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
284 replace("-ndrdisc", "").replace("-pdr", ""). \
285 replace("-ndr", "").\
286 replace("1t1c", "1c").replace("2t1c", "1c").\
287 replace("2t2c", "2c").replace("4t2c", "2c").\
288 replace("4t4c", "4c").replace("8t4c", "4c")
290 # TODO: Re-work when NDRPDRDISC tests are not used
291 if table["include-tests"] == "MRR":
292 tbl_dict[tst_name_mod]["cmp-data"]. \
293 append(tst_data["result"]["receive-rate"].avg)
294 elif table["include-tests"] == "PDR":
295 if tst_data["type"] == "PDR":
296 tbl_dict[tst_name_mod]["cmp-data"]. \
297 append(tst_data["throughput"]["value"])
298 elif tst_data["type"] == "NDRPDR":
299 tbl_dict[tst_name_mod]["cmp-data"].append(
300 tst_data["throughput"]["PDR"]["LOWER"])
301 elif table["include-tests"] == "NDR":
302 if tst_data["type"] == "NDR":
303 tbl_dict[tst_name_mod]["cmp-data"]. \
304 append(tst_data["throughput"]["value"])
305 elif tst_data["type"] == "NDRPDR":
306 tbl_dict[tst_name_mod]["cmp-data"].append(
307 tst_data["throughput"]["NDR"]["LOWER"])
313 tbl_dict.pop(tst_name_mod, None)
316 for job, builds in item["data"].items():
318 for tst_name, tst_data in data[job][str(build)].iteritems():
319 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
320 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
321 replace("-ndrdisc", "").replace("-pdr", ""). \
322 replace("-ndr", "").\
323 replace("1t1c", "1c").replace("2t1c", "1c").\
324 replace("2t2c", "2c").replace("4t2c", "2c").\
325 replace("4t4c", "4c").replace("8t4c", "4c")
326 if tbl_dict.get(tst_name_mod, None) is None:
328 if tbl_dict[tst_name_mod].get("history", None) is None:
329 tbl_dict[tst_name_mod]["history"] = OrderedDict()
330 if tbl_dict[tst_name_mod]["history"].get(item["title"],
332 tbl_dict[tst_name_mod]["history"][item["title"]] = \
335 # TODO: Re-work when NDRPDRDISC tests are not used
336 if table["include-tests"] == "MRR":
337 tbl_dict[tst_name_mod]["history"][item["title"
338 ]].append(tst_data["result"]["receive-rate"].
340 elif table["include-tests"] == "PDR":
341 if tst_data["type"] == "PDR":
342 tbl_dict[tst_name_mod]["history"][
344 append(tst_data["throughput"]["value"])
345 elif tst_data["type"] == "NDRPDR":
346 tbl_dict[tst_name_mod]["history"][item[
347 "title"]].append(tst_data["throughput"][
349 elif table["include-tests"] == "NDR":
350 if tst_data["type"] == "NDR":
351 tbl_dict[tst_name_mod]["history"][
353 append(tst_data["throughput"]["value"])
354 elif tst_data["type"] == "NDRPDR":
355 tbl_dict[tst_name_mod]["history"][item[
356 "title"]].append(tst_data["throughput"][
360 except (TypeError, KeyError):
364 for tst_name in tbl_dict.keys():
365 item = [tbl_dict[tst_name]["name"], ]
367 if tbl_dict[tst_name].get("history", None) is not None:
368 for hist_data in tbl_dict[tst_name]["history"].values():
370 item.append(round(mean(hist_data) / 1000000, 2))
371 item.append(round(stdev(hist_data) / 1000000, 2))
373 item.extend([None, None])
375 item.extend([None, None])
376 data_t = tbl_dict[tst_name]["ref-data"]
378 item.append(round(mean(data_t) / 1000000, 2))
379 item.append(round(stdev(data_t) / 1000000, 2))
381 item.extend([None, None])
382 data_t = tbl_dict[tst_name]["cmp-data"]
384 item.append(round(mean(data_t) / 1000000, 2))
385 item.append(round(stdev(data_t) / 1000000, 2))
387 item.extend([None, None])
388 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
389 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
390 if len(item) == len(header):
393 # Sort the table according to the relative change
394 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
396 # Generate csv tables:
397 csv_file = "{0}.csv".format(table["output-file"])
398 with open(csv_file, "w") as file_handler:
399 file_handler.write(header_str)
401 file_handler.write(",".join([str(item) for item in test]) + "\n")
403 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
406 def table_performance_trending_dashboard(table, input_data):
407 """Generate the table(s) with algorithm:
408 table_performance_trending_dashboard
409 specified in the specification file.
411 :param table: Table to generate.
412 :param input_data: Data to process.
413 :type table: pandas.Series
414 :type input_data: InputData
417 logging.info(" Generating the table {0} ...".
418 format(table.get("title", "")))
421 logging.info(" Creating the data set for the {0} '{1}'.".
422 format(table.get("type", ""), table.get("title", "")))
423 data = input_data.filter_data(table, continue_on_error=True)
425 # Prepare the header of the tables
426 header = ["Test Case",
428 "Short-Term Change [%]",
429 "Long-Term Change [%]",
433 header_str = ",".join(header) + "\n"
435 # Prepare data to the table:
437 for job, builds in table["data"].items():
439 for tst_name, tst_data in data[job][str(build)].iteritems():
440 if tst_name.lower() in table["ignore-list"]:
442 if tbl_dict.get(tst_name, None) is None:
443 groups = re.search(REGEX_NIC, tst_data["parent"])
446 nic = groups.group(0)
447 tbl_dict[tst_name] = {
448 "name": "{0}-{1}".format(nic, tst_data["name"]),
449 "data": OrderedDict()}
451 tbl_dict[tst_name]["data"][str(build)] = \
452 tst_data["result"]["receive-rate"]
453 except (TypeError, KeyError):
454 pass # No data in output.xml for this test
457 for tst_name in tbl_dict.keys():
458 data_t = tbl_dict[tst_name]["data"]
462 classification_lst, avgs = classify_anomalies(data_t)
464 win_size = min(len(data_t), table["window"])
465 long_win_size = min(len(data_t), table["long-trend-window"])
469 [x for x in avgs[-long_win_size:-win_size]
474 avg_week_ago = avgs[max(-win_size, -len(avgs))]
476 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
477 rel_change_last = nan
479 rel_change_last = round(
480 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
482 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
483 rel_change_long = nan
485 rel_change_long = round(
486 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
488 if classification_lst:
489 if isnan(rel_change_last) and isnan(rel_change_long):
492 [tbl_dict[tst_name]["name"],
493 '-' if isnan(last_avg) else
494 round(last_avg / 1000000, 2),
495 '-' if isnan(rel_change_last) else rel_change_last,
496 '-' if isnan(rel_change_long) else rel_change_long,
497 classification_lst[-win_size:].count("regression"),
498 classification_lst[-win_size:].count("progression")])
500 tbl_lst.sort(key=lambda rel: rel[0])
503 for nrr in range(table["window"], -1, -1):
504 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
505 for nrp in range(table["window"], -1, -1):
506 tbl_out = [item for item in tbl_reg if item[5] == nrp]
507 tbl_out.sort(key=lambda rel: rel[2])
508 tbl_sorted.extend(tbl_out)
510 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
512 logging.info(" Writing file: '{0}'".format(file_name))
513 with open(file_name, "w") as file_handler:
514 file_handler.write(header_str)
515 for test in tbl_sorted:
516 file_handler.write(",".join([str(item) for item in test]) + '\n')
518 txt_file_name = "{0}.txt".format(table["output-file"])
519 logging.info(" Writing file: '{0}'".format(txt_file_name))
520 convert_csv_to_pretty_txt(file_name, txt_file_name)
523 def _generate_url(base, testbed, test_name):
524 """Generate URL to a trending plot from the name of the test case.
526 :param base: The base part of URL common to all test cases.
527 :param testbed: The testbed used for testing.
528 :param test_name: The name of the test case.
532 :returns: The URL to the plot with the trending data for the given test
542 if "lbdpdk" in test_name or "lbvpp" in test_name:
543 file_name = "link_bonding"
545 elif "114b" in test_name and "vhost" in test_name:
548 elif "testpmd" in test_name or "l3fwd" in test_name:
551 elif "memif" in test_name:
552 file_name = "container_memif"
555 elif "srv6" in test_name:
558 elif "vhost" in test_name:
559 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
560 file_name = "vm_vhost_l2"
561 if "114b" in test_name:
563 elif "l2xcbase" in test_name:
564 feature = "-base-l2xc"
565 elif "l2bdbasemaclrn" in test_name:
566 feature = "-base-l2bd"
569 elif "ip4base" in test_name:
570 file_name = "vm_vhost_ip4"
573 elif "ipsec" in test_name:
575 feature = "-base-scale"
577 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
578 file_name = "ip4_tunnels"
581 elif "ip4base" in test_name or "ip4scale" in test_name:
583 if "xl710" in test_name:
584 feature = "-base-scale-features"
585 elif "iacl" in test_name:
586 feature = "-features-iacl"
587 elif "oacl" in test_name:
588 feature = "-features-oacl"
589 elif "snat" in test_name or "cop" in test_name:
590 feature = "-features"
592 feature = "-base-scale"
594 elif "ip6base" in test_name or "ip6scale" in test_name:
596 feature = "-base-scale"
598 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
599 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
600 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
602 if "macip" in test_name:
603 feature = "-features-macip"
604 elif "iacl" in test_name:
605 feature = "-features-iacl"
606 elif "oacl" in test_name:
607 feature = "-features-oacl"
609 feature = "-base-scale"
611 if "x520" in test_name:
613 elif "x710" in test_name:
615 elif "xl710" in test_name:
617 elif "xxv710" in test_name:
623 if "64b" in test_name:
625 elif "78b" in test_name:
627 elif "imix" in test_name:
629 elif "9000b" in test_name:
631 elif "1518b" in test_name:
633 elif "114b" in test_name:
637 anchor += framesize + '-'
639 if "1t1c" in test_name:
641 elif "2t2c" in test_name:
643 elif "4t4c" in test_name:
645 elif "2t1c" in test_name:
647 elif "4t2c" in test_name:
649 elif "8t4c" in test_name:
652 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
656 def table_performance_trending_dashboard_html(table, input_data):
657 """Generate the table(s) with algorithm:
658 table_performance_trending_dashboard_html specified in the specification
661 :param table: Table to generate.
662 :param input_data: Data to process.
664 :type input_data: InputData
667 testbed = table.get("testbed", None)
669 logging.error("The testbed is not defined for the table '{0}'.".
670 format(table.get("title", "")))
673 logging.info(" Generating the table {0} ...".
674 format(table.get("title", "")))
677 with open(table["input-file"], 'rb') as csv_file:
678 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
679 csv_lst = [item for item in csv_content]
681 logging.warning("The input file is not defined.")
683 except csv.Error as err:
684 logging.warning("Not possible to process the file '{0}'.\n{1}".
685 format(table["input-file"], err))
689 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
692 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
693 for idx, item in enumerate(csv_lst[0]):
694 alignment = "left" if idx == 0 else "center"
695 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
699 colors = {"regression": ("#ffcccc", "#ff9999"),
700 "progression": ("#c6ecc6", "#9fdf9f"),
701 "normal": ("#e9f1fb", "#d4e4f7")}
702 for r_idx, row in enumerate(csv_lst[1:]):
706 color = "progression"
709 background = colors[color][r_idx % 2]
710 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
713 for c_idx, item in enumerate(row):
714 alignment = "left" if c_idx == 0 else "center"
715 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
718 url = _generate_url("../trending/", testbed, item)
719 ref = ET.SubElement(td, "a", attrib=dict(href=url))
724 with open(table["output-file"], 'w') as html_file:
725 logging.info(" Writing file: '{0}'".format(table["output-file"]))
726 html_file.write(".. raw:: html\n\n\t")
727 html_file.write(ET.tostring(dashboard))
728 html_file.write("\n\t<p><br><br></p>\n")
730 logging.warning("The output file is not defined.")
734 def table_failed_tests(table, input_data):
735 """Generate the table(s) with algorithm: table_failed_tests
736 specified in the specification file.
738 :param table: Table to generate.
739 :param input_data: Data to process.
740 :type table: pandas.Series
741 :type input_data: InputData
744 logging.info(" Generating the table {0} ...".
745 format(table.get("title", "")))
748 logging.info(" Creating the data set for the {0} '{1}'.".
749 format(table.get("type", ""), table.get("title", "")))
750 data = input_data.filter_data(table, continue_on_error=True)
752 # Prepare the header of the tables
753 header = ["Test Case",
755 "Last Failure [Time]",
756 "Last Failure [VPP-Build-Id]",
757 "Last Failure [CSIT-Job-Build-Id]"]
759 # Generate the data for the table according to the model in the table
762 for job, builds in table["data"].items():
765 for tst_name, tst_data in data[job][build].iteritems():
766 if tst_name.lower() in table["ignore-list"]:
768 if tbl_dict.get(tst_name, None) is None:
769 groups = re.search(REGEX_NIC, tst_data["parent"])
772 nic = groups.group(0)
773 tbl_dict[tst_name] = {
774 "name": "{0}-{1}".format(nic, tst_data["name"]),
775 "data": OrderedDict()}
777 tbl_dict[tst_name]["data"][build] = (
779 input_data.metadata(job, build).get("generated", ""),
780 input_data.metadata(job, build).get("version", ""),
782 except (TypeError, KeyError):
783 pass # No data in output.xml for this test
786 for tst_data in tbl_dict.values():
787 win_size = min(len(tst_data["data"]), table["window"])
789 for val in tst_data["data"].values()[-win_size:]:
792 fails_last_date = val[1]
793 fails_last_vpp = val[2]
794 fails_last_csit = val[3]
796 tbl_lst.append([tst_data["name"],
800 "mrr-daily-build-{0}".format(fails_last_csit)])
802 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
804 for nrf in range(table["window"], -1, -1):
805 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
806 tbl_sorted.extend(tbl_fails)
807 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
809 logging.info(" Writing file: '{0}'".format(file_name))
810 with open(file_name, "w") as file_handler:
811 file_handler.write(",".join(header) + "\n")
812 for test in tbl_sorted:
813 file_handler.write(",".join([str(item) for item in test]) + '\n')
815 txt_file_name = "{0}.txt".format(table["output-file"])
816 logging.info(" Writing file: '{0}'".format(txt_file_name))
817 convert_csv_to_pretty_txt(file_name, txt_file_name)
820 def table_failed_tests_html(table, input_data):
821 """Generate the table(s) with algorithm: table_failed_tests_html
822 specified in the specification file.
824 :param table: Table to generate.
825 :param input_data: Data to process.
826 :type table: pandas.Series
827 :type input_data: InputData
830 testbed = table.get("testbed", None)
832 logging.error("The testbed is not defined for the table '{0}'.".
833 format(table.get("title", "")))
836 logging.info(" Generating the table {0} ...".
837 format(table.get("title", "")))
840 with open(table["input-file"], 'rb') as csv_file:
841 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
842 csv_lst = [item for item in csv_content]
844 logging.warning("The input file is not defined.")
846 except csv.Error as err:
847 logging.warning("Not possible to process the file '{0}'.\n{1}".
848 format(table["input-file"], err))
852 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
855 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
856 for idx, item in enumerate(csv_lst[0]):
857 alignment = "left" if idx == 0 else "center"
858 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
862 colors = ("#e9f1fb", "#d4e4f7")
863 for r_idx, row in enumerate(csv_lst[1:]):
864 background = colors[r_idx % 2]
865 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
868 for c_idx, item in enumerate(row):
869 alignment = "left" if c_idx == 0 else "center"
870 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
873 url = _generate_url("../trending/", testbed, item)
874 ref = ET.SubElement(td, "a", attrib=dict(href=url))
879 with open(table["output-file"], 'w') as html_file:
880 logging.info(" Writing file: '{0}'".format(table["output-file"]))
881 html_file.write(".. raw:: html\n\n\t")
882 html_file.write(ET.tostring(failed_tests))
883 html_file.write("\n\t<p><br><br></p>\n")
885 logging.warning("The output file is not defined.")