1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 def generate_tables(spec, data):
32 """Generate all tables specified in the specification file.
34 :param spec: Specification read from the specification file.
35 :param data: Data to process.
36 :type spec: Specification
40 logging.info("Generating the tables ...")
41 for table in spec.tables:
43 eval(table["algorithm"])(table, data)
44 except NameError as err:
45 logging.error("Probably algorithm '{alg}' is not defined: {err}".
46 format(alg=table["algorithm"], err=repr(err)))
50 def table_details(table, input_data):
51 """Generate the table(s) with algorithm: table_detailed_test_results
52 specified in the specification file.
54 :param table: Table to generate.
55 :param input_data: Data to process.
56 :type table: pandas.Series
57 :type input_data: InputData
60 logging.info(" Generating the table {0} ...".
61 format(table.get("title", "")))
64 logging.info(" Creating the data set for the {0} '{1}'.".
65 format(table.get("type", ""), table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 logging.info(" Creating the data set for the {0} '{1}'.".
133 format(table.get("type", ""), table.get("title", "")))
134 data = input_data.filter_data(table)
135 data = input_data.merge_data(data)
136 data.sort_index(inplace=True)
138 logging.info(" Creating the data set for the {0} '{1}'.".
139 format(table.get("type", ""), table.get("title", "")))
140 suites = input_data.filter_data(table, data_set="suites")
141 suites = input_data.merge_data(suites)
143 # Prepare the header of the tables
145 for column in table["columns"]:
146 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
148 for _, suite in suites.iteritems():
150 suite_name = suite["name"]
152 for test in data.keys():
153 if data[test]["parent"] in suite_name:
155 for column in table["columns"]:
157 col_data = str(data[test][column["data"].
158 split(" ")[1]]).replace('"', '""')
159 if column["data"].split(" ")[1] in ("vat-history",
161 col_data = replace(col_data, " |br| ", "",
163 col_data = " |prein| {0} |preout| ".\
164 format(col_data[:-5])
165 row_lst.append('"{0}"'.format(col_data))
167 row_lst.append("No data")
168 table_lst.append(row_lst)
170 # Write the data to file
172 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173 table["output-file-ext"])
174 logging.info(" Writing file: '{}'".format(file_name))
175 with open(file_name, "w") as file_handler:
176 file_handler.write(",".join(header) + "\n")
177 for item in table_lst:
178 file_handler.write(",".join(item) + "\n")
180 logging.info(" Done.")
183 def table_performance_comparison(table, input_data):
184 """Generate the table(s) with algorithm: table_performance_comparison
185 specified in the specification file.
187 :param table: Table to generate.
188 :param input_data: Data to process.
189 :type table: pandas.Series
190 :type input_data: InputData
193 logging.info(" Generating the table {0} ...".
194 format(table.get("title", "")))
197 logging.info(" Creating the data set for the {0} '{1}'.".
198 format(table.get("type", ""), table.get("title", "")))
199 data = input_data.filter_data(table, continue_on_error=True)
201 # Prepare the header of the tables
203 header = ["Test case", ]
205 if table["include-tests"] == "MRR":
206 hdr_param = "Receive Rate"
208 hdr_param = "Throughput"
210 history = table.get("history", None)
214 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
215 "{0} Stdev [Mpps]".format(item["title"])])
217 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
218 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
219 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
220 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
222 header_str = ",".join(header) + "\n"
223 except (AttributeError, KeyError) as err:
224 logging.error("The model is invalid, missing parameter: {0}".
228 # Prepare data to the table:
230 for job, builds in table["reference"]["data"].items():
232 for tst_name, tst_data in data[job][str(build)].iteritems():
233 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
234 replace("-ndrpdr", "").replace("-pdrdisc", "").\
235 replace("-ndrdisc", "").replace("-pdr", "").\
236 replace("-ndr", "").\
237 replace("1t1c", "1c").replace("2t1c", "1c").\
238 replace("2t2c", "2c").replace("4t2c", "2c").\
239 replace("4t4c", "4c").replace("8t4c", "4c")
240 if tbl_dict.get(tst_name_mod, None) is None:
241 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
242 "-".join(tst_data["name"].
244 if "comparison across testbeds" in table["title"].lower():
246 replace("1t1c", "1c").replace("2t1c", "1c").\
247 replace("2t2c", "2c").replace("4t2c", "2c").\
248 replace("4t4c", "4c").replace("8t4c", "4c")
249 tbl_dict[tst_name_mod] = {"name": name,
253 # TODO: Re-work when NDRPDRDISC tests are not used
254 if table["include-tests"] == "MRR":
255 tbl_dict[tst_name_mod]["ref-data"]. \
256 append(tst_data["result"]["receive-rate"].avg)
257 elif table["include-tests"] == "PDR":
258 if tst_data["type"] == "PDR":
259 tbl_dict[tst_name_mod]["ref-data"]. \
260 append(tst_data["throughput"]["value"])
261 elif tst_data["type"] == "NDRPDR":
262 tbl_dict[tst_name_mod]["ref-data"].append(
263 tst_data["throughput"]["PDR"]["LOWER"])
264 elif table["include-tests"] == "NDR":
265 if tst_data["type"] == "NDR":
266 tbl_dict[tst_name_mod]["ref-data"]. \
267 append(tst_data["throughput"]["value"])
268 elif tst_data["type"] == "NDRPDR":
269 tbl_dict[tst_name_mod]["ref-data"].append(
270 tst_data["throughput"]["NDR"]["LOWER"])
274 pass # No data in output.xml for this test
276 for job, builds in table["compare"]["data"].items():
278 for tst_name, tst_data in data[job][str(build)].iteritems():
279 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
280 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
281 replace("-ndrdisc", "").replace("-pdr", ""). \
282 replace("-ndr", "").\
283 replace("1t1c", "1c").replace("2t1c", "1c").\
284 replace("2t2c", "2c").replace("4t2c", "2c").\
285 replace("4t4c", "4c").replace("8t4c", "4c")
287 # TODO: Re-work when NDRPDRDISC tests are not used
288 if table["include-tests"] == "MRR":
289 tbl_dict[tst_name_mod]["cmp-data"]. \
290 append(tst_data["result"]["receive-rate"].avg)
291 elif table["include-tests"] == "PDR":
292 if tst_data["type"] == "PDR":
293 tbl_dict[tst_name_mod]["cmp-data"]. \
294 append(tst_data["throughput"]["value"])
295 elif tst_data["type"] == "NDRPDR":
296 tbl_dict[tst_name_mod]["cmp-data"].append(
297 tst_data["throughput"]["PDR"]["LOWER"])
298 elif table["include-tests"] == "NDR":
299 if tst_data["type"] == "NDR":
300 tbl_dict[tst_name_mod]["cmp-data"]. \
301 append(tst_data["throughput"]["value"])
302 elif tst_data["type"] == "NDRPDR":
303 tbl_dict[tst_name_mod]["cmp-data"].append(
304 tst_data["throughput"]["NDR"]["LOWER"])
310 tbl_dict.pop(tst_name_mod, None)
313 for job, builds in item["data"].items():
315 for tst_name, tst_data in data[job][str(build)].iteritems():
316 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
317 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
318 replace("-ndrdisc", "").replace("-pdr", ""). \
319 replace("-ndr", "").\
320 replace("1t1c", "1c").replace("2t1c", "1c").\
321 replace("2t2c", "2c").replace("4t2c", "2c").\
322 replace("4t4c", "4c").replace("8t4c", "4c")
323 if tbl_dict.get(tst_name_mod, None) is None:
325 if tbl_dict[tst_name_mod].get("history", None) is None:
326 tbl_dict[tst_name_mod]["history"] = OrderedDict()
327 if tbl_dict[tst_name_mod]["history"].get(item["title"],
329 tbl_dict[tst_name_mod]["history"][item["title"]] = \
332 # TODO: Re-work when NDRPDRDISC tests are not used
333 if table["include-tests"] == "MRR":
334 tbl_dict[tst_name_mod]["history"][item["title"
335 ]].append(tst_data["result"]["receive-rate"].
337 elif table["include-tests"] == "PDR":
338 if tst_data["type"] == "PDR":
339 tbl_dict[tst_name_mod]["history"][
341 append(tst_data["throughput"]["value"])
342 elif tst_data["type"] == "NDRPDR":
343 tbl_dict[tst_name_mod]["history"][item[
344 "title"]].append(tst_data["throughput"][
346 elif table["include-tests"] == "NDR":
347 if tst_data["type"] == "NDR":
348 tbl_dict[tst_name_mod]["history"][
350 append(tst_data["throughput"]["value"])
351 elif tst_data["type"] == "NDRPDR":
352 tbl_dict[tst_name_mod]["history"][item[
353 "title"]].append(tst_data["throughput"][
357 except (TypeError, KeyError):
361 for tst_name in tbl_dict.keys():
362 item = [tbl_dict[tst_name]["name"], ]
364 if tbl_dict[tst_name].get("history", None) is not None:
365 for hist_data in tbl_dict[tst_name]["history"].values():
367 item.append(round(mean(hist_data) / 1000000, 2))
368 item.append(round(stdev(hist_data) / 1000000, 2))
370 item.extend([None, None])
372 item.extend([None, None])
373 data_t = tbl_dict[tst_name]["ref-data"]
375 item.append(round(mean(data_t) / 1000000, 2))
376 item.append(round(stdev(data_t) / 1000000, 2))
378 item.extend([None, None])
379 data_t = tbl_dict[tst_name]["cmp-data"]
381 item.append(round(mean(data_t) / 1000000, 2))
382 item.append(round(stdev(data_t) / 1000000, 2))
384 item.extend([None, None])
385 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
386 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
387 if len(item) == len(header):
390 # Sort the table according to the relative change
391 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
393 # Generate csv tables:
394 csv_file = "{0}.csv".format(table["output-file"])
395 with open(csv_file, "w") as file_handler:
396 file_handler.write(header_str)
398 file_handler.write(",".join([str(item) for item in test]) + "\n")
400 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
403 def table_performance_trending_dashboard(table, input_data):
404 """Generate the table(s) with algorithm:
405 table_performance_trending_dashboard
406 specified in the specification file.
408 :param table: Table to generate.
409 :param input_data: Data to process.
410 :type table: pandas.Series
411 :type input_data: InputData
414 logging.info(" Generating the table {0} ...".
415 format(table.get("title", "")))
418 logging.info(" Creating the data set for the {0} '{1}'.".
419 format(table.get("type", ""), table.get("title", "")))
420 data = input_data.filter_data(table, continue_on_error=True)
422 # Prepare the header of the tables
423 header = ["Test Case",
425 "Short-Term Change [%]",
426 "Long-Term Change [%]",
430 header_str = ",".join(header) + "\n"
432 # Prepare data to the table:
434 for job, builds in table["data"].items():
436 for tst_name, tst_data in data[job][str(build)].iteritems():
437 if tst_name.lower() in table["ignore-list"]:
439 if tbl_dict.get(tst_name, None) is None:
440 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
442 tbl_dict[tst_name] = {"name": name,
443 "data": OrderedDict()}
445 tbl_dict[tst_name]["data"][str(build)] = \
446 tst_data["result"]["receive-rate"]
447 except (TypeError, KeyError):
448 pass # No data in output.xml for this test
451 for tst_name in tbl_dict.keys():
452 data_t = tbl_dict[tst_name]["data"]
456 classification_lst, avgs = classify_anomalies(data_t)
458 win_size = min(len(data_t), table["window"])
459 long_win_size = min(len(data_t), table["long-trend-window"])
463 [x for x in avgs[-long_win_size:-win_size]
468 avg_week_ago = avgs[max(-win_size, -len(avgs))]
470 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
471 rel_change_last = nan
473 rel_change_last = round(
474 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
476 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
477 rel_change_long = nan
479 rel_change_long = round(
480 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
482 if classification_lst:
483 if isnan(rel_change_last) and isnan(rel_change_long):
486 [tbl_dict[tst_name]["name"],
487 '-' if isnan(last_avg) else
488 round(last_avg / 1000000, 2),
489 '-' if isnan(rel_change_last) else rel_change_last,
490 '-' if isnan(rel_change_long) else rel_change_long,
491 classification_lst[-win_size:].count("regression"),
492 classification_lst[-win_size:].count("progression")])
494 tbl_lst.sort(key=lambda rel: rel[0])
497 for nrr in range(table["window"], -1, -1):
498 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
499 for nrp in range(table["window"], -1, -1):
500 tbl_out = [item for item in tbl_reg if item[5] == nrp]
501 tbl_out.sort(key=lambda rel: rel[2])
502 tbl_sorted.extend(tbl_out)
504 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
506 logging.info(" Writing file: '{0}'".format(file_name))
507 with open(file_name, "w") as file_handler:
508 file_handler.write(header_str)
509 for test in tbl_sorted:
510 file_handler.write(",".join([str(item) for item in test]) + '\n')
512 txt_file_name = "{0}.txt".format(table["output-file"])
513 logging.info(" Writing file: '{0}'".format(txt_file_name))
514 convert_csv_to_pretty_txt(file_name, txt_file_name)
517 def _generate_url(base, test_name):
518 """Generate URL to a trending plot from the name of the test case.
520 :param base: The base part of URL common to all test cases.
521 :param test_name: The name of the test case.
524 :returns: The URL to the plot with the trending data for the given test
534 if "lbdpdk" in test_name or "lbvpp" in test_name:
535 file_name = "link_bonding.html"
537 elif "testpmd" in test_name or "l3fwd" in test_name:
538 file_name = "dpdk.html"
540 elif "memif" in test_name:
541 file_name = "container_memif.html"
543 elif "srv6" in test_name:
544 file_name = "srv6.html"
546 elif "vhost" in test_name:
547 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
548 file_name = "vm_vhost_l2.html"
549 elif "ip4base" in test_name:
550 file_name = "vm_vhost_ip4.html"
552 elif "ipsec" in test_name:
553 file_name = "ipsec.html"
555 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
556 file_name = "ip4_tunnels.html"
558 elif "ip4base" in test_name or "ip4scale" in test_name:
559 file_name = "ip4.html"
560 if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
561 feature = "-features"
563 elif "ip6base" in test_name or "ip6scale" in test_name:
564 file_name = "ip6.html"
566 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
567 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
568 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
569 file_name = "l2.html"
570 if "iacl" in test_name:
571 feature = "-features"
573 if "x520" in test_name:
575 elif "x710" in test_name:
577 elif "xl710" in test_name:
580 if "64b" in test_name:
582 elif "78b" in test_name:
584 elif "imix" in test_name:
586 elif "9000b" in test_name:
588 elif "1518" in test_name:
591 if "1t1c" in test_name:
593 elif "2t2c" in test_name:
595 elif "4t4c" in test_name:
598 return url + file_name + anchor + feature
601 def table_performance_trending_dashboard_html(table, input_data):
602 """Generate the table(s) with algorithm:
603 table_performance_trending_dashboard_html specified in the specification
606 :param table: Table to generate.
607 :param input_data: Data to process.
608 :type table: pandas.Series
609 :type input_data: InputData
612 logging.info(" Generating the table {0} ...".
613 format(table.get("title", "")))
616 with open(table["input-file"], 'rb') as csv_file:
617 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
618 csv_lst = [item for item in csv_content]
620 logging.warning("The input file is not defined.")
622 except csv.Error as err:
623 logging.warning("Not possible to process the file '{0}'.\n{1}".
624 format(table["input-file"], err))
628 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
631 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
632 for idx, item in enumerate(csv_lst[0]):
633 alignment = "left" if idx == 0 else "center"
634 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
638 colors = {"regression": ("#ffcccc", "#ff9999"),
639 "progression": ("#c6ecc6", "#9fdf9f"),
640 "normal": ("#e9f1fb", "#d4e4f7")}
641 for r_idx, row in enumerate(csv_lst[1:]):
645 color = "progression"
648 background = colors[color][r_idx % 2]
649 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
652 for c_idx, item in enumerate(row):
653 alignment = "left" if c_idx == 0 else "center"
654 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
657 url = _generate_url("../trending/", item)
658 ref = ET.SubElement(td, "a", attrib=dict(href=url))
663 with open(table["output-file"], 'w') as html_file:
664 logging.info(" Writing file: '{0}'".format(table["output-file"]))
665 html_file.write(".. raw:: html\n\n\t")
666 html_file.write(ET.tostring(dashboard))
667 html_file.write("\n\t<p><br><br></p>\n")
669 logging.warning("The output file is not defined.")
673 def table_failed_tests(table, input_data):
674 """Generate the table(s) with algorithm: table_failed_tests
675 specified in the specification file.
677 :param table: Table to generate.
678 :param input_data: Data to process.
679 :type table: pandas.Series
680 :type input_data: InputData
683 logging.info(" Generating the table {0} ...".
684 format(table.get("title", "")))
687 logging.info(" Creating the data set for the {0} '{1}'.".
688 format(table.get("type", ""), table.get("title", "")))
689 data = input_data.filter_data(table, continue_on_error=True)
691 # Prepare the header of the tables
692 header = ["Test Case",
694 "Last Failure [Time]",
695 "Last Failure [VPP-Build-Id]",
696 "Last Failure [CSIT-Job-Build-Id]"]
698 # Generate the data for the table according to the model in the table
701 for job, builds in table["data"].items():
704 for tst_name, tst_data in data[job][build].iteritems():
705 if tst_name.lower() in table["ignore-list"]:
707 if tbl_dict.get(tst_name, None) is None:
708 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
710 tbl_dict[tst_name] = {"name": name,
711 "data": OrderedDict()}
713 tbl_dict[tst_name]["data"][build] = (
715 input_data.metadata(job, build).get("generated", ""),
716 input_data.metadata(job, build).get("version", ""),
718 except (TypeError, KeyError):
719 pass # No data in output.xml for this test
722 for tst_data in tbl_dict.values():
723 win_size = min(len(tst_data["data"]), table["window"])
725 for val in tst_data["data"].values()[-win_size:]:
728 fails_last_date = val[1]
729 fails_last_vpp = val[2]
730 fails_last_csit = val[3]
732 tbl_lst.append([tst_data["name"],
736 "mrr-daily-build-{0}".format(fails_last_csit)])
738 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
740 for nrf in range(table["window"], -1, -1):
741 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
742 tbl_sorted.extend(tbl_fails)
743 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
745 logging.info(" Writing file: '{0}'".format(file_name))
746 with open(file_name, "w") as file_handler:
747 file_handler.write(",".join(header) + "\n")
748 for test in tbl_sorted:
749 file_handler.write(",".join([str(item) for item in test]) + '\n')
751 txt_file_name = "{0}.txt".format(table["output-file"])
752 logging.info(" Writing file: '{0}'".format(txt_file_name))
753 convert_csv_to_pretty_txt(file_name, txt_file_name)
756 def table_failed_tests_html(table, input_data):
757 """Generate the table(s) with algorithm: table_failed_tests_html
758 specified in the specification file.
760 :param table: Table to generate.
761 :param input_data: Data to process.
762 :type table: pandas.Series
763 :type input_data: InputData
766 logging.info(" Generating the table {0} ...".
767 format(table.get("title", "")))
770 with open(table["input-file"], 'rb') as csv_file:
771 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
772 csv_lst = [item for item in csv_content]
774 logging.warning("The input file is not defined.")
776 except csv.Error as err:
777 logging.warning("Not possible to process the file '{0}'.\n{1}".
778 format(table["input-file"], err))
782 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
785 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
786 for idx, item in enumerate(csv_lst[0]):
787 alignment = "left" if idx == 0 else "center"
788 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
792 colors = ("#e9f1fb", "#d4e4f7")
793 for r_idx, row in enumerate(csv_lst[1:]):
794 background = colors[r_idx % 2]
795 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
798 for c_idx, item in enumerate(row):
799 alignment = "left" if c_idx == 0 else "center"
800 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
803 url = _generate_url("../trending/", item)
804 ref = ET.SubElement(td, "a", attrib=dict(href=url))
809 with open(table["output-file"], 'w') as html_file:
810 logging.info(" Writing file: '{0}'".format(table["output-file"]))
811 html_file.write(".. raw:: html\n\n\t")
812 html_file.write(ET.tostring(failed_tests))
813 html_file.write("\n\t<p><br><br></p>\n")
815 logging.warning("The output file is not defined.")