1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 def generate_tables(spec, data):
32 """Generate all tables specified in the specification file.
34 :param spec: Specification read from the specification file.
35 :param data: Data to process.
36 :type spec: Specification
40 logging.info("Generating the tables ...")
41 for table in spec.tables:
43 eval(table["algorithm"])(table, data)
44 except NameError as err:
45 logging.error("Probably algorithm '{alg}' is not defined: {err}".
46 format(alg=table["algorithm"], err=repr(err)))
50 def table_details(table, input_data):
51 """Generate the table(s) with algorithm: table_detailed_test_results
52 specified in the specification file.
54 :param table: Table to generate.
55 :param input_data: Data to process.
56 :type table: pandas.Series
57 :type input_data: InputData
60 logging.info(" Generating the table {0} ...".
61 format(table.get("title", "")))
64 logging.info(" Creating the data set for the {0} '{1}'.".
65 format(table.get("type", ""), table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 logging.info(" Creating the data set for the {0} '{1}'.".
133 format(table.get("type", ""), table.get("title", "")))
134 data = input_data.filter_data(table)
135 data = input_data.merge_data(data)
136 data.sort_index(inplace=True)
138 logging.info(" Creating the data set for the {0} '{1}'.".
139 format(table.get("type", ""), table.get("title", "")))
140 suites = input_data.filter_data(table, data_set="suites")
141 suites = input_data.merge_data(suites)
143 # Prepare the header of the tables
145 for column in table["columns"]:
146 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
148 for _, suite in suites.iteritems():
150 suite_name = suite["name"]
152 for test in data.keys():
153 if data[test]["parent"] in suite_name:
155 for column in table["columns"]:
157 col_data = str(data[test][column["data"].
158 split(" ")[1]]).replace('"', '""')
159 if column["data"].split(" ")[1] in ("vat-history",
161 col_data = replace(col_data, " |br| ", "",
163 col_data = " |prein| {0} |preout| ".\
164 format(col_data[:-5])
165 row_lst.append('"{0}"'.format(col_data))
167 row_lst.append("No data")
168 table_lst.append(row_lst)
170 # Write the data to file
172 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173 table["output-file-ext"])
174 logging.info(" Writing file: '{}'".format(file_name))
175 with open(file_name, "w") as file_handler:
176 file_handler.write(",".join(header) + "\n")
177 for item in table_lst:
178 file_handler.write(",".join(item) + "\n")
180 logging.info(" Done.")
183 def table_performance_comparison(table, input_data):
184 """Generate the table(s) with algorithm: table_performance_comparison
185 specified in the specification file.
187 :param table: Table to generate.
188 :param input_data: Data to process.
189 :type table: pandas.Series
190 :type input_data: InputData
193 logging.info(" Generating the table {0} ...".
194 format(table.get("title", "")))
197 logging.info(" Creating the data set for the {0} '{1}'.".
198 format(table.get("type", ""), table.get("title", "")))
199 data = input_data.filter_data(table, continue_on_error=True)
201 # Prepare the header of the tables
203 header = ["Test case", ]
205 if table["include-tests"] == "MRR":
206 hdr_param = "Receive Rate"
208 hdr_param = "Throughput"
210 history = table.get("history", None)
214 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
215 "{0} Stdev [Mpps]".format(item["title"])])
217 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
218 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
219 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
220 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
222 header_str = ",".join(header) + "\n"
223 except (AttributeError, KeyError) as err:
224 logging.error("The model is invalid, missing parameter: {0}".
228 # Prepare data to the table:
230 for job, builds in table["reference"]["data"].items():
232 for tst_name, tst_data in data[job][str(build)].iteritems():
233 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
234 replace("-ndrpdr", "").replace("-pdrdisc", "").\
235 replace("-ndrdisc", "").replace("-pdr", "").\
236 replace("-ndr", "").\
237 replace("1t1c", "1c").replace("2t1c", "1c").\
238 replace("2t2c", "2c").replace("4t2c", "2c").\
239 replace("4t4c", "4c").replace("8t4c", "4c")
240 if tbl_dict.get(tst_name_mod, None) is None:
241 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
242 "-".join(tst_data["name"].
244 tbl_dict[tst_name_mod] = {"name": name,
248 # TODO: Re-work when NDRPDRDISC tests are not used
249 if table["include-tests"] == "MRR":
250 tbl_dict[tst_name_mod]["ref-data"]. \
251 append(tst_data["result"]["receive-rate"].avg)
252 elif table["include-tests"] == "PDR":
253 if tst_data["type"] == "PDR":
254 tbl_dict[tst_name_mod]["ref-data"]. \
255 append(tst_data["throughput"]["value"])
256 elif tst_data["type"] == "NDRPDR":
257 tbl_dict[tst_name_mod]["ref-data"].append(
258 tst_data["throughput"]["PDR"]["LOWER"])
259 elif table["include-tests"] == "NDR":
260 if tst_data["type"] == "NDR":
261 tbl_dict[tst_name_mod]["ref-data"]. \
262 append(tst_data["throughput"]["value"])
263 elif tst_data["type"] == "NDRPDR":
264 tbl_dict[tst_name_mod]["ref-data"].append(
265 tst_data["throughput"]["NDR"]["LOWER"])
269 pass # No data in output.xml for this test
271 for job, builds in table["compare"]["data"].items():
273 for tst_name, tst_data in data[job][str(build)].iteritems():
274 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
275 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
276 replace("-ndrdisc", "").replace("-pdr", ""). \
277 replace("-ndr", "").\
278 replace("1t1c", "1c").replace("2t1c", "1c").\
279 replace("2t2c", "2c").replace("4t2c", "2c").\
280 replace("4t4c", "4c").replace("8t4c", "4c")
282 # TODO: Re-work when NDRPDRDISC tests are not used
283 if table["include-tests"] == "MRR":
284 tbl_dict[tst_name_mod]["cmp-data"]. \
285 append(tst_data["result"]["receive-rate"].avg)
286 elif table["include-tests"] == "PDR":
287 if tst_data["type"] == "PDR":
288 tbl_dict[tst_name_mod]["cmp-data"]. \
289 append(tst_data["throughput"]["value"])
290 elif tst_data["type"] == "NDRPDR":
291 tbl_dict[tst_name_mod]["cmp-data"].append(
292 tst_data["throughput"]["PDR"]["LOWER"])
293 elif table["include-tests"] == "NDR":
294 if tst_data["type"] == "NDR":
295 tbl_dict[tst_name_mod]["cmp-data"]. \
296 append(tst_data["throughput"]["value"])
297 elif tst_data["type"] == "NDRPDR":
298 tbl_dict[tst_name_mod]["cmp-data"].append(
299 tst_data["throughput"]["NDR"]["LOWER"])
305 tbl_dict.pop(tst_name_mod, None)
308 for job, builds in item["data"].items():
310 for tst_name, tst_data in data[job][str(build)].iteritems():
311 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
312 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
313 replace("-ndrdisc", "").replace("-pdr", ""). \
314 replace("-ndr", "").\
315 replace("1t1c", "1c").replace("2t1c", "1c").\
316 replace("2t2c", "2c").replace("4t2c", "2c").\
317 replace("4t4c", "4c").replace("8t4c", "4c")
318 if tbl_dict.get(tst_name_mod, None) is None:
320 if tbl_dict[tst_name_mod].get("history", None) is None:
321 tbl_dict[tst_name_mod]["history"] = OrderedDict()
322 if tbl_dict[tst_name_mod]["history"].get(item["title"],
324 tbl_dict[tst_name_mod]["history"][item["title"]] = \
327 # TODO: Re-work when NDRPDRDISC tests are not used
328 if table["include-tests"] == "MRR":
329 tbl_dict[tst_name_mod]["history"][item["title"
330 ]].append(tst_data["result"]["receive-rate"].
332 elif table["include-tests"] == "PDR":
333 if tst_data["type"] == "PDR":
334 tbl_dict[tst_name_mod]["history"][
336 append(tst_data["throughput"]["value"])
337 elif tst_data["type"] == "NDRPDR":
338 tbl_dict[tst_name_mod]["history"][item[
339 "title"]].append(tst_data["throughput"][
341 elif table["include-tests"] == "NDR":
342 if tst_data["type"] == "NDR":
343 tbl_dict[tst_name_mod]["history"][
345 append(tst_data["throughput"]["value"])
346 elif tst_data["type"] == "NDRPDR":
347 tbl_dict[tst_name_mod]["history"][item[
348 "title"]].append(tst_data["throughput"][
352 except (TypeError, KeyError):
356 for tst_name in tbl_dict.keys():
357 item = [tbl_dict[tst_name]["name"], ]
359 if tbl_dict[tst_name].get("history", None) is not None:
360 for hist_data in tbl_dict[tst_name]["history"].values():
362 item.append(round(mean(hist_data) / 1000000, 2))
363 item.append(round(stdev(hist_data) / 1000000, 2))
365 item.extend([None, None])
367 item.extend([None, None])
368 data_t = tbl_dict[tst_name]["ref-data"]
370 item.append(round(mean(data_t) / 1000000, 2))
371 item.append(round(stdev(data_t) / 1000000, 2))
373 item.extend([None, None])
374 data_t = tbl_dict[tst_name]["cmp-data"]
376 item.append(round(mean(data_t) / 1000000, 2))
377 item.append(round(stdev(data_t) / 1000000, 2))
379 item.extend([None, None])
380 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
381 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
382 if len(item) == len(header):
385 # Sort the table according to the relative change
386 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
388 # Generate csv tables:
389 csv_file = "{0}.csv".format(table["output-file"])
390 with open(csv_file, "w") as file_handler:
391 file_handler.write(header_str)
393 file_handler.write(",".join([str(item) for item in test]) + "\n")
395 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
398 def table_performance_trending_dashboard(table, input_data):
399 """Generate the table(s) with algorithm:
400 table_performance_trending_dashboard
401 specified in the specification file.
403 :param table: Table to generate.
404 :param input_data: Data to process.
405 :type table: pandas.Series
406 :type input_data: InputData
409 logging.info(" Generating the table {0} ...".
410 format(table.get("title", "")))
413 logging.info(" Creating the data set for the {0} '{1}'.".
414 format(table.get("type", ""), table.get("title", "")))
415 data = input_data.filter_data(table, continue_on_error=True)
417 # Prepare the header of the tables
418 header = ["Test Case",
420 "Short-Term Change [%]",
421 "Long-Term Change [%]",
425 header_str = ",".join(header) + "\n"
427 # Prepare data to the table:
429 for job, builds in table["data"].items():
431 for tst_name, tst_data in data[job][str(build)].iteritems():
432 if tst_name.lower() in table["ignore-list"]:
434 if tbl_dict.get(tst_name, None) is None:
435 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
437 tbl_dict[tst_name] = {"name": name,
438 "data": OrderedDict()}
440 tbl_dict[tst_name]["data"][str(build)] = \
441 tst_data["result"]["receive-rate"]
442 except (TypeError, KeyError):
443 pass # No data in output.xml for this test
446 for tst_name in tbl_dict.keys():
447 data_t = tbl_dict[tst_name]["data"]
451 classification_lst, avgs = classify_anomalies(data_t)
453 win_size = min(len(data_t), table["window"])
454 long_win_size = min(len(data_t), table["long-trend-window"])
458 [x for x in avgs[-long_win_size:-win_size]
463 avg_week_ago = avgs[max(-win_size, -len(avgs))]
465 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
466 rel_change_last = nan
468 rel_change_last = round(
469 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
471 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
472 rel_change_long = nan
474 rel_change_long = round(
475 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
477 if classification_lst:
478 if isnan(rel_change_last) and isnan(rel_change_long):
481 [tbl_dict[tst_name]["name"],
482 '-' if isnan(last_avg) else
483 round(last_avg / 1000000, 2),
484 '-' if isnan(rel_change_last) else rel_change_last,
485 '-' if isnan(rel_change_long) else rel_change_long,
486 classification_lst[-win_size:].count("regression"),
487 classification_lst[-win_size:].count("progression")])
489 tbl_lst.sort(key=lambda rel: rel[0])
492 for nrr in range(table["window"], -1, -1):
493 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
494 for nrp in range(table["window"], -1, -1):
495 tbl_out = [item for item in tbl_reg if item[5] == nrp]
496 tbl_out.sort(key=lambda rel: rel[2])
497 tbl_sorted.extend(tbl_out)
499 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
501 logging.info(" Writing file: '{0}'".format(file_name))
502 with open(file_name, "w") as file_handler:
503 file_handler.write(header_str)
504 for test in tbl_sorted:
505 file_handler.write(",".join([str(item) for item in test]) + '\n')
507 txt_file_name = "{0}.txt".format(table["output-file"])
508 logging.info(" Writing file: '{0}'".format(txt_file_name))
509 convert_csv_to_pretty_txt(file_name, txt_file_name)
512 def _generate_url(base, test_name):
513 """Generate URL to a trending plot from the name of the test case.
515 :param base: The base part of URL common to all test cases.
516 :param test_name: The name of the test case.
519 :returns: The URL to the plot with the trending data for the given test
529 if "lbdpdk" in test_name or "lbvpp" in test_name:
530 file_name = "link_bonding.html"
532 elif "testpmd" in test_name or "l3fwd" in test_name:
533 file_name = "dpdk.html"
535 elif "memif" in test_name:
536 file_name = "container_memif.html"
538 elif "srv6" in test_name:
539 file_name = "srv6.html"
541 elif "vhost" in test_name:
542 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
543 file_name = "vm_vhost_l2.html"
544 elif "ip4base" in test_name:
545 file_name = "vm_vhost_ip4.html"
547 elif "ipsec" in test_name:
548 file_name = "ipsec.html"
550 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
551 file_name = "ip4_tunnels.html"
553 elif "ip4base" in test_name or "ip4scale" in test_name:
554 file_name = "ip4.html"
555 if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
556 feature = "-features"
558 elif "ip6base" in test_name or "ip6scale" in test_name:
559 file_name = "ip6.html"
561 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
562 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
563 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
564 file_name = "l2.html"
565 if "iacl" in test_name:
566 feature = "-features"
568 if "x520" in test_name:
570 elif "x710" in test_name:
572 elif "xl710" in test_name:
575 if "64b" in test_name:
577 elif "78b" in test_name:
579 elif "imix" in test_name:
581 elif "9000b" in test_name:
583 elif "1518" in test_name:
586 if "1t1c" in test_name:
588 elif "2t2c" in test_name:
590 elif "4t4c" in test_name:
593 return url + file_name + anchor + feature
596 def table_performance_trending_dashboard_html(table, input_data):
597 """Generate the table(s) with algorithm:
598 table_performance_trending_dashboard_html specified in the specification
601 :param table: Table to generate.
602 :param input_data: Data to process.
603 :type table: pandas.Series
604 :type input_data: InputData
607 logging.info(" Generating the table {0} ...".
608 format(table.get("title", "")))
611 with open(table["input-file"], 'rb') as csv_file:
612 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
613 csv_lst = [item for item in csv_content]
615 logging.warning("The input file is not defined.")
617 except csv.Error as err:
618 logging.warning("Not possible to process the file '{0}'.\n{1}".
619 format(table["input-file"], err))
623 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
626 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
627 for idx, item in enumerate(csv_lst[0]):
628 alignment = "left" if idx == 0 else "center"
629 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
633 colors = {"regression": ("#ffcccc", "#ff9999"),
634 "progression": ("#c6ecc6", "#9fdf9f"),
635 "normal": ("#e9f1fb", "#d4e4f7")}
636 for r_idx, row in enumerate(csv_lst[1:]):
640 color = "progression"
643 background = colors[color][r_idx % 2]
644 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
647 for c_idx, item in enumerate(row):
648 alignment = "left" if c_idx == 0 else "center"
649 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
652 url = _generate_url("../trending/", item)
653 ref = ET.SubElement(td, "a", attrib=dict(href=url))
658 with open(table["output-file"], 'w') as html_file:
659 logging.info(" Writing file: '{0}'".format(table["output-file"]))
660 html_file.write(".. raw:: html\n\n\t")
661 html_file.write(ET.tostring(dashboard))
662 html_file.write("\n\t<p><br><br></p>\n")
664 logging.warning("The output file is not defined.")
668 def table_failed_tests(table, input_data):
669 """Generate the table(s) with algorithm: table_failed_tests
670 specified in the specification file.
672 :param table: Table to generate.
673 :param input_data: Data to process.
674 :type table: pandas.Series
675 :type input_data: InputData
678 logging.info(" Generating the table {0} ...".
679 format(table.get("title", "")))
682 logging.info(" Creating the data set for the {0} '{1}'.".
683 format(table.get("type", ""), table.get("title", "")))
684 data = input_data.filter_data(table, continue_on_error=True)
686 # Prepare the header of the tables
687 header = ["Test Case",
689 "Last Failure [Time]",
690 "Last Failure [VPP-Build-Id]",
691 "Last Failure [CSIT-Job-Build-Id]"]
693 # Generate the data for the table according to the model in the table
696 for job, builds in table["data"].items():
699 for tst_name, tst_data in data[job][build].iteritems():
700 if tst_name.lower() in table["ignore-list"]:
702 if tbl_dict.get(tst_name, None) is None:
703 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
705 tbl_dict[tst_name] = {"name": name,
706 "data": OrderedDict()}
708 tbl_dict[tst_name]["data"][build] = (
710 input_data.metadata(job, build).get("generated", ""),
711 input_data.metadata(job, build).get("version", ""),
713 except (TypeError, KeyError):
714 pass # No data in output.xml for this test
717 for tst_data in tbl_dict.values():
718 win_size = min(len(tst_data["data"]), table["window"])
720 for val in tst_data["data"].values()[-win_size:]:
723 fails_last_date = val[1]
724 fails_last_vpp = val[2]
725 fails_last_csit = val[3]
727 tbl_lst.append([tst_data["name"],
731 "mrr-daily-build-{0}".format(fails_last_csit)])
733 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
735 for nrf in range(table["window"], -1, -1):
736 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
737 tbl_sorted.extend(tbl_fails)
738 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
740 logging.info(" Writing file: '{0}'".format(file_name))
741 with open(file_name, "w") as file_handler:
742 file_handler.write(",".join(header) + "\n")
743 for test in tbl_sorted:
744 file_handler.write(",".join([str(item) for item in test]) + '\n')
746 txt_file_name = "{0}.txt".format(table["output-file"])
747 logging.info(" Writing file: '{0}'".format(txt_file_name))
748 convert_csv_to_pretty_txt(file_name, txt_file_name)
751 def table_failed_tests_html(table, input_data):
752 """Generate the table(s) with algorithm: table_failed_tests_html
753 specified in the specification file.
755 :param table: Table to generate.
756 :param input_data: Data to process.
757 :type table: pandas.Series
758 :type input_data: InputData
761 logging.info(" Generating the table {0} ...".
762 format(table.get("title", "")))
765 with open(table["input-file"], 'rb') as csv_file:
766 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
767 csv_lst = [item for item in csv_content]
769 logging.warning("The input file is not defined.")
771 except csv.Error as err:
772 logging.warning("Not possible to process the file '{0}'.\n{1}".
773 format(table["input-file"], err))
777 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
780 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
781 for idx, item in enumerate(csv_lst[0]):
782 alignment = "left" if idx == 0 else "center"
783 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
787 colors = ("#e9f1fb", "#d4e4f7")
788 for r_idx, row in enumerate(csv_lst[1:]):
789 background = colors[r_idx % 2]
790 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
793 for c_idx, item in enumerate(row):
794 alignment = "left" if c_idx == 0 else "center"
795 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
798 url = _generate_url("../trending/", item)
799 ref = ET.SubElement(td, "a", attrib=dict(href=url))
804 with open(table["output-file"], 'w') as html_file:
805 logging.info(" Writing file: '{0}'".format(table["output-file"]))
806 html_file.write(".. raw:: html\n\n\t")
807 html_file.write(ET.tostring(failed_tests))
808 html_file.write("\n\t<p><br><br></p>\n")
810 logging.warning("The output file is not defined.")