1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
34 def generate_tables(spec, data):
35 """Generate all tables specified in the specification file.
37 :param spec: Specification read from the specification file.
38 :param data: Data to process.
39 :type spec: Specification
43 logging.info("Generating the tables ...")
44 for table in spec.tables:
46 eval(table["algorithm"])(table, data)
47 except NameError as err:
48 logging.error("Probably algorithm '{alg}' is not defined: {err}".
49 format(alg=table["algorithm"], err=repr(err)))
53 def table_details(table, input_data):
54 """Generate the table(s) with algorithm: table_detailed_test_results
55 specified in the specification file.
57 :param table: Table to generate.
58 :param input_data: Data to process.
59 :type table: pandas.Series
60 :type input_data: InputData
63 logging.info(" Generating the table {0} ...".
64 format(table.get("title", "")))
67 logging.info(" Creating the data set for the {0} '{1}'.".
68 format(table.get("type", ""), table.get("title", "")))
69 data = input_data.filter_data(table)
71 # Prepare the header of the tables
73 for column in table["columns"]:
74 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
76 # Generate the data for the table according to the model in the table
78 job = table["data"].keys()[0]
79 build = str(table["data"][job][0])
81 suites = input_data.suites(job, build)
83 logging.error(" No data available. The table will not be generated.")
86 for suite_longname, suite in suites.iteritems():
88 suite_name = suite["name"]
90 for test in data[job][build].keys():
91 if data[job][build][test]["parent"] in suite_name:
93 for column in table["columns"]:
95 col_data = str(data[job][build][test][column["data"].
96 split(" ")[1]]).replace('"', '""')
97 if column["data"].split(" ")[1] in ("vat-history",
99 col_data = replace(col_data, " |br| ", "",
101 col_data = " |prein| {0} |preout| ".\
102 format(col_data[:-5])
103 row_lst.append('"{0}"'.format(col_data))
105 row_lst.append("No data")
106 table_lst.append(row_lst)
108 # Write the data to file
110 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
111 table["output-file-ext"])
112 logging.info(" Writing file: '{}'".format(file_name))
113 with open(file_name, "w") as file_handler:
114 file_handler.write(",".join(header) + "\n")
115 for item in table_lst:
116 file_handler.write(",".join(item) + "\n")
118 logging.info(" Done.")
121 def table_merged_details(table, input_data):
122 """Generate the table(s) with algorithm: table_merged_details
123 specified in the specification file.
125 :param table: Table to generate.
126 :param input_data: Data to process.
127 :type table: pandas.Series
128 :type input_data: InputData
131 logging.info(" Generating the table {0} ...".
132 format(table.get("title", "")))
135 logging.info(" Creating the data set for the {0} '{1}'.".
136 format(table.get("type", ""), table.get("title", "")))
137 data = input_data.filter_data(table)
138 data = input_data.merge_data(data)
139 data.sort_index(inplace=True)
141 logging.info(" Creating the data set for the {0} '{1}'.".
142 format(table.get("type", ""), table.get("title", "")))
143 suites = input_data.filter_data(table, data_set="suites")
144 suites = input_data.merge_data(suites)
146 # Prepare the header of the tables
148 for column in table["columns"]:
149 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
151 for _, suite in suites.iteritems():
153 suite_name = suite["name"]
155 for test in data.keys():
156 if data[test]["parent"] in suite_name:
158 for column in table["columns"]:
160 col_data = str(data[test][column["data"].
161 split(" ")[1]]).replace('"', '""')
162 if column["data"].split(" ")[1] in ("vat-history",
164 col_data = replace(col_data, " |br| ", "",
166 col_data = " |prein| {0} |preout| ".\
167 format(col_data[:-5])
168 row_lst.append('"{0}"'.format(col_data))
170 row_lst.append("No data")
171 table_lst.append(row_lst)
173 # Write the data to file
175 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
176 table["output-file-ext"])
177 logging.info(" Writing file: '{}'".format(file_name))
178 with open(file_name, "w") as file_handler:
179 file_handler.write(",".join(header) + "\n")
180 for item in table_lst:
181 file_handler.write(",".join(item) + "\n")
183 logging.info(" Done.")
186 def table_performance_comparison(table, input_data):
187 """Generate the table(s) with algorithm: table_performance_comparison
188 specified in the specification file.
190 :param table: Table to generate.
191 :param input_data: Data to process.
192 :type table: pandas.Series
193 :type input_data: InputData
196 logging.info(" Generating the table {0} ...".
197 format(table.get("title", "")))
200 logging.info(" Creating the data set for the {0} '{1}'.".
201 format(table.get("type", ""), table.get("title", "")))
202 data = input_data.filter_data(table, continue_on_error=True)
204 # Prepare the header of the tables
206 header = ["Test case", ]
208 if table["include-tests"] == "MRR":
209 hdr_param = "Receive Rate"
211 hdr_param = "Throughput"
213 history = table.get("history", None)
217 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
218 "{0} Stdev [Mpps]".format(item["title"])])
220 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
221 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
222 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
225 header_str = ",".join(header) + "\n"
226 except (AttributeError, KeyError) as err:
227 logging.error("The model is invalid, missing parameter: {0}".
231 # Prepare data to the table:
233 for job, builds in table["reference"]["data"].items():
235 for tst_name, tst_data in data[job][str(build)].iteritems():
236 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
237 replace("-ndrpdr", "").replace("-pdrdisc", "").\
238 replace("-ndrdisc", "").replace("-pdr", "").\
239 replace("-ndr", "").\
240 replace("1t1c", "1c").replace("2t1c", "1c").\
241 replace("2t2c", "2c").replace("4t2c", "2c").\
242 replace("4t4c", "4c").replace("8t4c", "4c")
243 if tbl_dict.get(tst_name_mod, None) is None:
244 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
245 "-".join(tst_data["name"].
247 if "comparison across testbeds" in table["title"].lower():
249 replace("1t1c", "1c").replace("2t1c", "1c").\
250 replace("2t2c", "2c").replace("4t2c", "2c").\
251 replace("4t4c", "4c").replace("8t4c", "4c")
252 tbl_dict[tst_name_mod] = {"name": name,
256 # TODO: Re-work when NDRPDRDISC tests are not used
257 if table["include-tests"] == "MRR":
258 tbl_dict[tst_name_mod]["ref-data"]. \
259 append(tst_data["result"]["receive-rate"].avg)
260 elif table["include-tests"] == "PDR":
261 if tst_data["type"] == "PDR":
262 tbl_dict[tst_name_mod]["ref-data"]. \
263 append(tst_data["throughput"]["value"])
264 elif tst_data["type"] == "NDRPDR":
265 tbl_dict[tst_name_mod]["ref-data"].append(
266 tst_data["throughput"]["PDR"]["LOWER"])
267 elif table["include-tests"] == "NDR":
268 if tst_data["type"] == "NDR":
269 tbl_dict[tst_name_mod]["ref-data"]. \
270 append(tst_data["throughput"]["value"])
271 elif tst_data["type"] == "NDRPDR":
272 tbl_dict[tst_name_mod]["ref-data"].append(
273 tst_data["throughput"]["NDR"]["LOWER"])
277 pass # No data in output.xml for this test
279 for job, builds in table["compare"]["data"].items():
281 for tst_name, tst_data in data[job][str(build)].iteritems():
282 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
283 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
284 replace("-ndrdisc", "").replace("-pdr", ""). \
285 replace("-ndr", "").\
286 replace("1t1c", "1c").replace("2t1c", "1c").\
287 replace("2t2c", "2c").replace("4t2c", "2c").\
288 replace("4t4c", "4c").replace("8t4c", "4c")
290 # TODO: Re-work when NDRPDRDISC tests are not used
291 if table["include-tests"] == "MRR":
292 tbl_dict[tst_name_mod]["cmp-data"]. \
293 append(tst_data["result"]["receive-rate"].avg)
294 elif table["include-tests"] == "PDR":
295 if tst_data["type"] == "PDR":
296 tbl_dict[tst_name_mod]["cmp-data"]. \
297 append(tst_data["throughput"]["value"])
298 elif tst_data["type"] == "NDRPDR":
299 tbl_dict[tst_name_mod]["cmp-data"].append(
300 tst_data["throughput"]["PDR"]["LOWER"])
301 elif table["include-tests"] == "NDR":
302 if tst_data["type"] == "NDR":
303 tbl_dict[tst_name_mod]["cmp-data"]. \
304 append(tst_data["throughput"]["value"])
305 elif tst_data["type"] == "NDRPDR":
306 tbl_dict[tst_name_mod]["cmp-data"].append(
307 tst_data["throughput"]["NDR"]["LOWER"])
313 tbl_dict.pop(tst_name_mod, None)
316 for job, builds in item["data"].items():
318 for tst_name, tst_data in data[job][str(build)].iteritems():
319 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
320 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
321 replace("-ndrdisc", "").replace("-pdr", ""). \
322 replace("-ndr", "").\
323 replace("1t1c", "1c").replace("2t1c", "1c").\
324 replace("2t2c", "2c").replace("4t2c", "2c").\
325 replace("4t4c", "4c").replace("8t4c", "4c")
326 if tbl_dict.get(tst_name_mod, None) is None:
328 if tbl_dict[tst_name_mod].get("history", None) is None:
329 tbl_dict[tst_name_mod]["history"] = OrderedDict()
330 if tbl_dict[tst_name_mod]["history"].get(item["title"],
332 tbl_dict[tst_name_mod]["history"][item["title"]] = \
335 # TODO: Re-work when NDRPDRDISC tests are not used
336 if table["include-tests"] == "MRR":
337 tbl_dict[tst_name_mod]["history"][item["title"
338 ]].append(tst_data["result"]["receive-rate"].
340 elif table["include-tests"] == "PDR":
341 if tst_data["type"] == "PDR":
342 tbl_dict[tst_name_mod]["history"][
344 append(tst_data["throughput"]["value"])
345 elif tst_data["type"] == "NDRPDR":
346 tbl_dict[tst_name_mod]["history"][item[
347 "title"]].append(tst_data["throughput"][
349 elif table["include-tests"] == "NDR":
350 if tst_data["type"] == "NDR":
351 tbl_dict[tst_name_mod]["history"][
353 append(tst_data["throughput"]["value"])
354 elif tst_data["type"] == "NDRPDR":
355 tbl_dict[tst_name_mod]["history"][item[
356 "title"]].append(tst_data["throughput"][
360 except (TypeError, KeyError):
364 for tst_name in tbl_dict.keys():
365 item = [tbl_dict[tst_name]["name"], ]
367 if tbl_dict[tst_name].get("history", None) is not None:
368 for hist_data in tbl_dict[tst_name]["history"].values():
370 item.append(round(mean(hist_data) / 1000000, 2))
371 item.append(round(stdev(hist_data) / 1000000, 2))
373 item.extend([None, None])
375 item.extend([None, None])
376 data_t = tbl_dict[tst_name]["ref-data"]
378 item.append(round(mean(data_t) / 1000000, 2))
379 item.append(round(stdev(data_t) / 1000000, 2))
381 item.extend([None, None])
382 data_t = tbl_dict[tst_name]["cmp-data"]
384 item.append(round(mean(data_t) / 1000000, 2))
385 item.append(round(stdev(data_t) / 1000000, 2))
387 item.extend([None, None])
388 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
389 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
390 if len(item) == len(header):
393 # Sort the table according to the relative change
394 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
396 # Generate csv tables:
397 csv_file = "{0}.csv".format(table["output-file"])
398 with open(csv_file, "w") as file_handler:
399 file_handler.write(header_str)
401 file_handler.write(",".join([str(item) for item in test]) + "\n")
403 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
406 def table_performance_trending_dashboard(table, input_data):
407 """Generate the table(s) with algorithm:
408 table_performance_trending_dashboard
409 specified in the specification file.
411 :param table: Table to generate.
412 :param input_data: Data to process.
413 :type table: pandas.Series
414 :type input_data: InputData
417 logging.info(" Generating the table {0} ...".
418 format(table.get("title", "")))
421 logging.info(" Creating the data set for the {0} '{1}'.".
422 format(table.get("type", ""), table.get("title", "")))
423 data = input_data.filter_data(table, continue_on_error=True)
425 # Prepare the header of the tables
426 header = ["Test Case",
428 "Short-Term Change [%]",
429 "Long-Term Change [%]",
433 header_str = ",".join(header) + "\n"
435 # Prepare data to the table:
437 for job, builds in table["data"].items():
439 for tst_name, tst_data in data[job][str(build)].iteritems():
440 if tst_name.lower() in table["ignore-list"]:
442 if tbl_dict.get(tst_name, None) is None:
443 groups = re.search(REGEX_NIC, tst_data["parent"])
446 nic = groups.group(0)
447 tbl_dict[tst_name] = {
448 "name": "{0}-{1}".format(nic, tst_data["name"]),
449 "data": OrderedDict()}
451 tbl_dict[tst_name]["data"][str(build)] = \
452 tst_data["result"]["receive-rate"]
453 except (TypeError, KeyError):
454 pass # No data in output.xml for this test
457 for tst_name in tbl_dict.keys():
458 data_t = tbl_dict[tst_name]["data"]
462 classification_lst, avgs = classify_anomalies(data_t)
464 win_size = min(len(data_t), table["window"])
465 long_win_size = min(len(data_t), table["long-trend-window"])
469 [x for x in avgs[-long_win_size:-win_size]
474 avg_week_ago = avgs[max(-win_size, -len(avgs))]
476 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
477 rel_change_last = nan
479 rel_change_last = round(
480 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
482 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
483 rel_change_long = nan
485 rel_change_long = round(
486 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
488 if classification_lst:
489 if isnan(rel_change_last) and isnan(rel_change_long):
492 [tbl_dict[tst_name]["name"],
493 '-' if isnan(last_avg) else
494 round(last_avg / 1000000, 2),
495 '-' if isnan(rel_change_last) else rel_change_last,
496 '-' if isnan(rel_change_long) else rel_change_long,
497 classification_lst[-win_size:].count("regression"),
498 classification_lst[-win_size:].count("progression")])
500 tbl_lst.sort(key=lambda rel: rel[0])
503 for nrr in range(table["window"], -1, -1):
504 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
505 for nrp in range(table["window"], -1, -1):
506 tbl_out = [item for item in tbl_reg if item[5] == nrp]
507 tbl_out.sort(key=lambda rel: rel[2])
508 tbl_sorted.extend(tbl_out)
510 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
512 logging.info(" Writing file: '{0}'".format(file_name))
513 with open(file_name, "w") as file_handler:
514 file_handler.write(header_str)
515 for test in tbl_sorted:
516 file_handler.write(",".join([str(item) for item in test]) + '\n')
518 txt_file_name = "{0}.txt".format(table["output-file"])
519 logging.info(" Writing file: '{0}'".format(txt_file_name))
520 convert_csv_to_pretty_txt(file_name, txt_file_name)
523 def _generate_url(base, testbed, test_name):
524 """Generate URL to a trending plot from the name of the test case.
526 :param base: The base part of URL common to all test cases.
527 :param testbed: The testbed used for testing.
528 :param test_name: The name of the test case.
532 :returns: The URL to the plot with the trending data for the given test
542 if "lbdpdk" in test_name or "lbvpp" in test_name:
543 file_name = "link_bonding"
545 elif "testpmd" in test_name or "l3fwd" in test_name:
548 elif "memif" in test_name:
549 file_name = "container_memif"
552 elif "srv6" in test_name:
555 elif "vhost" in test_name:
556 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
557 file_name = "vm_vhost_l2"
558 elif "ip4base" in test_name:
559 file_name = "vm_vhost_ip4"
562 elif "ipsec" in test_name:
564 feature = "-base-scale"
566 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
567 file_name = "ip4_tunnels"
570 elif "ip4base" in test_name or "ip4scale" in test_name:
572 if "xl710" in test_name:
573 feature = "-base-scale-features"
574 elif "acl" in test_name or "snat" in test_name or "cop" in test_name:
575 feature = "-features"
577 feature = "-base-scale"
579 elif "ip6base" in test_name or "ip6scale" in test_name:
581 feature = "-base-scale"
583 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
584 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
585 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
587 if "acl" in test_name:
588 feature = "-features"
590 feature = "-base-scale"
592 if "x520" in test_name:
594 elif "x710" in test_name:
596 elif "xl710" in test_name:
598 elif "xxv710" in test_name:
604 if "64b" in test_name:
606 elif "78b" in test_name:
608 elif "imix" in test_name:
610 elif "9000b" in test_name:
612 elif "1518b" in test_name:
614 elif "114b" in test_name:
618 anchor += framesize + '-'
620 if "1t1c" in test_name:
622 elif "2t2c" in test_name:
624 elif "4t4c" in test_name:
626 elif "2t1c" in test_name:
628 elif "4t2c" in test_name:
630 elif "8t4c" in test_name:
633 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
637 def table_performance_trending_dashboard_html(table, input_data):
638 """Generate the table(s) with algorithm:
639 table_performance_trending_dashboard_html specified in the specification
642 :param table: Table to generate.
643 :param input_data: Data to process.
645 :type input_data: InputData
648 testbed = table.get("testbed", None)
650 logging.error("The testbed is not defined for the table '{0}'.".
651 format(table.get("title", "")))
654 logging.info(" Generating the table {0} ...".
655 format(table.get("title", "")))
658 with open(table["input-file"], 'rb') as csv_file:
659 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
660 csv_lst = [item for item in csv_content]
662 logging.warning("The input file is not defined.")
664 except csv.Error as err:
665 logging.warning("Not possible to process the file '{0}'.\n{1}".
666 format(table["input-file"], err))
670 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
673 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
674 for idx, item in enumerate(csv_lst[0]):
675 alignment = "left" if idx == 0 else "center"
676 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
680 colors = {"regression": ("#ffcccc", "#ff9999"),
681 "progression": ("#c6ecc6", "#9fdf9f"),
682 "normal": ("#e9f1fb", "#d4e4f7")}
683 for r_idx, row in enumerate(csv_lst[1:]):
687 color = "progression"
690 background = colors[color][r_idx % 2]
691 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
694 for c_idx, item in enumerate(row):
695 alignment = "left" if c_idx == 0 else "center"
696 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
699 url = _generate_url("../trending/", testbed, item)
700 ref = ET.SubElement(td, "a", attrib=dict(href=url))
705 with open(table["output-file"], 'w') as html_file:
706 logging.info(" Writing file: '{0}'".format(table["output-file"]))
707 html_file.write(".. raw:: html\n\n\t")
708 html_file.write(ET.tostring(dashboard))
709 html_file.write("\n\t<p><br><br></p>\n")
711 logging.warning("The output file is not defined.")
715 def table_failed_tests(table, input_data):
716 """Generate the table(s) with algorithm: table_failed_tests
717 specified in the specification file.
719 :param table: Table to generate.
720 :param input_data: Data to process.
721 :type table: pandas.Series
722 :type input_data: InputData
725 logging.info(" Generating the table {0} ...".
726 format(table.get("title", "")))
729 logging.info(" Creating the data set for the {0} '{1}'.".
730 format(table.get("type", ""), table.get("title", "")))
731 data = input_data.filter_data(table, continue_on_error=True)
733 # Prepare the header of the tables
734 header = ["Test Case",
736 "Last Failure [Time]",
737 "Last Failure [VPP-Build-Id]",
738 "Last Failure [CSIT-Job-Build-Id]"]
740 # Generate the data for the table according to the model in the table
743 for job, builds in table["data"].items():
746 for tst_name, tst_data in data[job][build].iteritems():
747 if tst_name.lower() in table["ignore-list"]:
749 if tbl_dict.get(tst_name, None) is None:
750 groups = re.search(REGEX_NIC, tst_data["parent"])
753 nic = groups.group(0)
754 tbl_dict[tst_name] = {
755 "name": "{0}-{1}".format(nic, tst_data["name"]),
756 "data": OrderedDict()}
758 tbl_dict[tst_name]["data"][build] = (
760 input_data.metadata(job, build).get("generated", ""),
761 input_data.metadata(job, build).get("version", ""),
763 except (TypeError, KeyError):
764 pass # No data in output.xml for this test
767 for tst_data in tbl_dict.values():
768 win_size = min(len(tst_data["data"]), table["window"])
770 for val in tst_data["data"].values()[-win_size:]:
773 fails_last_date = val[1]
774 fails_last_vpp = val[2]
775 fails_last_csit = val[3]
777 tbl_lst.append([tst_data["name"],
781 "mrr-daily-build-{0}".format(fails_last_csit)])
783 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
785 for nrf in range(table["window"], -1, -1):
786 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
787 tbl_sorted.extend(tbl_fails)
788 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
790 logging.info(" Writing file: '{0}'".format(file_name))
791 with open(file_name, "w") as file_handler:
792 file_handler.write(",".join(header) + "\n")
793 for test in tbl_sorted:
794 file_handler.write(",".join([str(item) for item in test]) + '\n')
796 txt_file_name = "{0}.txt".format(table["output-file"])
797 logging.info(" Writing file: '{0}'".format(txt_file_name))
798 convert_csv_to_pretty_txt(file_name, txt_file_name)
801 def table_failed_tests_html(table, input_data):
802 """Generate the table(s) with algorithm: table_failed_tests_html
803 specified in the specification file.
805 :param table: Table to generate.
806 :param input_data: Data to process.
807 :type table: pandas.Series
808 :type input_data: InputData
811 testbed = table.get("testbed", None)
813 logging.error("The testbed is not defined for the table '{0}'.".
814 format(table.get("title", "")))
817 logging.info(" Generating the table {0} ...".
818 format(table.get("title", "")))
821 with open(table["input-file"], 'rb') as csv_file:
822 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
823 csv_lst = [item for item in csv_content]
825 logging.warning("The input file is not defined.")
827 except csv.Error as err:
828 logging.warning("Not possible to process the file '{0}'.\n{1}".
829 format(table["input-file"], err))
833 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
836 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
837 for idx, item in enumerate(csv_lst[0]):
838 alignment = "left" if idx == 0 else "center"
839 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
843 colors = ("#e9f1fb", "#d4e4f7")
844 for r_idx, row in enumerate(csv_lst[1:]):
845 background = colors[r_idx % 2]
846 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
849 for c_idx, item in enumerate(row):
850 alignment = "left" if c_idx == 0 else "center"
851 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
854 url = _generate_url("../trending/", testbed, item)
855 ref = ET.SubElement(td, "a", attrib=dict(href=url))
860 with open(table["output-file"], 'w') as html_file:
861 logging.info(" Writing file: '{0}'".format(table["output-file"]))
862 html_file.write(".. raw:: html\n\n\t")
863 html_file.write(ET.tostring(failed_tests))
864 html_file.write("\n\t<p><br><br></p>\n")
866 logging.warning("The output file is not defined.")