1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
21 from string import replace
22 from collections import OrderedDict
23 from numpy import nan, isnan
24 from xml.etree import ElementTree as ET
26 from errors import PresentationError
27 from utils import mean, stdev, relative_change, classify_anomalies, \
28 convert_csv_to_pretty_txt
31 def generate_tables(spec, data):
32 """Generate all tables specified in the specification file.
34 :param spec: Specification read from the specification file.
35 :param data: Data to process.
36 :type spec: Specification
40 logging.info("Generating the tables ...")
41 for table in spec.tables:
43 eval(table["algorithm"])(table, data)
44 except NameError as err:
45 logging.error("Probably algorithm '{alg}' is not defined: {err}".
46 format(alg=table["algorithm"], err=repr(err)))
50 def table_details(table, input_data):
51 """Generate the table(s) with algorithm: table_detailed_test_results
52 specified in the specification file.
54 :param table: Table to generate.
55 :param input_data: Data to process.
56 :type table: pandas.Series
57 :type input_data: InputData
60 logging.info(" Generating the table {0} ...".
61 format(table.get("title", "")))
64 logging.info(" Creating the data set for the {0} '{1}'.".
65 format(table.get("type", ""), table.get("title", "")))
66 data = input_data.filter_data(table)
68 # Prepare the header of the tables
70 for column in table["columns"]:
71 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
73 # Generate the data for the table according to the model in the table
75 job = table["data"].keys()[0]
76 build = str(table["data"][job][0])
78 suites = input_data.suites(job, build)
80 logging.error(" No data available. The table will not be generated.")
83 for suite_longname, suite in suites.iteritems():
85 suite_name = suite["name"]
87 for test in data[job][build].keys():
88 if data[job][build][test]["parent"] in suite_name:
90 for column in table["columns"]:
92 col_data = str(data[job][build][test][column["data"].
93 split(" ")[1]]).replace('"', '""')
94 if column["data"].split(" ")[1] in ("vat-history",
96 col_data = replace(col_data, " |br| ", "",
98 col_data = " |prein| {0} |preout| ".\
100 row_lst.append('"{0}"'.format(col_data))
102 row_lst.append("No data")
103 table_lst.append(row_lst)
105 # Write the data to file
107 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
108 table["output-file-ext"])
109 logging.info(" Writing file: '{}'".format(file_name))
110 with open(file_name, "w") as file_handler:
111 file_handler.write(",".join(header) + "\n")
112 for item in table_lst:
113 file_handler.write(",".join(item) + "\n")
115 logging.info(" Done.")
118 def table_merged_details(table, input_data):
119 """Generate the table(s) with algorithm: table_merged_details
120 specified in the specification file.
122 :param table: Table to generate.
123 :param input_data: Data to process.
124 :type table: pandas.Series
125 :type input_data: InputData
128 logging.info(" Generating the table {0} ...".
129 format(table.get("title", "")))
132 logging.info(" Creating the data set for the {0} '{1}'.".
133 format(table.get("type", ""), table.get("title", "")))
134 data = input_data.filter_data(table)
135 data = input_data.merge_data(data)
136 data.sort_index(inplace=True)
138 logging.info(" Creating the data set for the {0} '{1}'.".
139 format(table.get("type", ""), table.get("title", "")))
140 suites = input_data.filter_data(table, data_set="suites")
141 suites = input_data.merge_data(suites)
143 # Prepare the header of the tables
145 for column in table["columns"]:
146 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
148 for _, suite in suites.iteritems():
150 suite_name = suite["name"]
152 for test in data.keys():
153 if data[test]["parent"] in suite_name:
155 for column in table["columns"]:
157 col_data = str(data[test][column["data"].
158 split(" ")[1]]).replace('"', '""')
159 if column["data"].split(" ")[1] in ("vat-history",
161 col_data = replace(col_data, " |br| ", "",
163 col_data = " |prein| {0} |preout| ".\
164 format(col_data[:-5])
165 row_lst.append('"{0}"'.format(col_data))
167 row_lst.append("No data")
168 table_lst.append(row_lst)
170 # Write the data to file
172 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
173 table["output-file-ext"])
174 logging.info(" Writing file: '{}'".format(file_name))
175 with open(file_name, "w") as file_handler:
176 file_handler.write(",".join(header) + "\n")
177 for item in table_lst:
178 file_handler.write(",".join(item) + "\n")
180 logging.info(" Done.")
183 def table_performance_comparison(table, input_data):
184 """Generate the table(s) with algorithm: table_performance_comparison
185 specified in the specification file.
187 :param table: Table to generate.
188 :param input_data: Data to process.
189 :type table: pandas.Series
190 :type input_data: InputData
193 logging.info(" Generating the table {0} ...".
194 format(table.get("title", "")))
197 logging.info(" Creating the data set for the {0} '{1}'.".
198 format(table.get("type", ""), table.get("title", "")))
199 data = input_data.filter_data(table, continue_on_error=True)
201 # Prepare the header of the tables
203 header = ["Test case", ]
205 if table["include-tests"] == "MRR":
206 hdr_param = "Receive Rate"
208 hdr_param = "Throughput"
210 history = table.get("history", None)
214 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
215 "{0} Stdev [Mpps]".format(item["title"])])
217 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
218 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
219 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
220 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
222 header_str = ",".join(header) + "\n"
223 except (AttributeError, KeyError) as err:
224 logging.error("The model is invalid, missing parameter: {0}".
228 # Prepare data to the table:
230 for job, builds in table["reference"]["data"].items():
232 for tst_name, tst_data in data[job][str(build)].iteritems():
233 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
234 replace("-ndrpdr", "").replace("-pdrdisc", "").\
235 replace("-ndrdisc", "").replace("-pdr", "").\
237 if tbl_dict.get(tst_name_mod, None) is None:
238 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
239 "-".join(tst_data["name"].
241 tbl_dict[tst_name_mod] = {"name": name,
245 # TODO: Re-work when NDRPDRDISC tests are not used
246 if table["include-tests"] == "MRR":
247 tbl_dict[tst_name_mod]["ref-data"]. \
248 append(tst_data["result"]["receive-rate"].avg)
249 elif table["include-tests"] == "PDR":
250 if tst_data["type"] == "PDR":
251 tbl_dict[tst_name_mod]["ref-data"]. \
252 append(tst_data["throughput"]["value"])
253 elif tst_data["type"] == "NDRPDR":
254 tbl_dict[tst_name_mod]["ref-data"].append(
255 tst_data["throughput"]["PDR"]["LOWER"])
256 elif table["include-tests"] == "NDR":
257 if tst_data["type"] == "NDR":
258 tbl_dict[tst_name_mod]["ref-data"]. \
259 append(tst_data["throughput"]["value"])
260 elif tst_data["type"] == "NDRPDR":
261 tbl_dict[tst_name_mod]["ref-data"].append(
262 tst_data["throughput"]["NDR"]["LOWER"])
266 pass # No data in output.xml for this test
268 for job, builds in table["compare"]["data"].items():
270 for tst_name, tst_data in data[job][str(build)].iteritems():
271 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
272 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
273 replace("-ndrdisc", "").replace("-pdr", ""). \
276 # TODO: Re-work when NDRPDRDISC tests are not used
277 if table["include-tests"] == "MRR":
278 tbl_dict[tst_name_mod]["cmp-data"]. \
279 append(tst_data["result"]["receive-rate"].avg)
280 elif table["include-tests"] == "PDR":
281 if tst_data["type"] == "PDR":
282 tbl_dict[tst_name_mod]["cmp-data"]. \
283 append(tst_data["throughput"]["value"])
284 elif tst_data["type"] == "NDRPDR":
285 tbl_dict[tst_name_mod]["cmp-data"].append(
286 tst_data["throughput"]["PDR"]["LOWER"])
287 elif table["include-tests"] == "NDR":
288 if tst_data["type"] == "NDR":
289 tbl_dict[tst_name_mod]["cmp-data"]. \
290 append(tst_data["throughput"]["value"])
291 elif tst_data["type"] == "NDRPDR":
292 tbl_dict[tst_name_mod]["cmp-data"].append(
293 tst_data["throughput"]["NDR"]["LOWER"])
299 tbl_dict.pop(tst_name_mod, None)
302 for job, builds in item["data"].items():
304 for tst_name, tst_data in data[job][str(build)].iteritems():
305 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
306 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
307 replace("-ndrdisc", "").replace("-pdr", ""). \
309 if tbl_dict.get(tst_name_mod, None) is None:
311 if tbl_dict[tst_name_mod].get("history", None) is None:
312 tbl_dict[tst_name_mod]["history"] = OrderedDict()
313 if tbl_dict[tst_name_mod]["history"].get(item["title"],
315 tbl_dict[tst_name_mod]["history"][item["title"]] = \
318 # TODO: Re-work when NDRPDRDISC tests are not used
319 if table["include-tests"] == "MRR":
320 tbl_dict[tst_name_mod]["history"][item["title"
321 ]].append(tst_data["result"]["receive-rate"].
323 elif table["include-tests"] == "PDR":
324 if tst_data["type"] == "PDR":
325 tbl_dict[tst_name_mod]["history"][
327 append(tst_data["throughput"]["value"])
328 elif tst_data["type"] == "NDRPDR":
329 tbl_dict[tst_name_mod]["history"][item[
330 "title"]].append(tst_data["throughput"][
332 elif table["include-tests"] == "NDR":
333 if tst_data["type"] == "NDR":
334 tbl_dict[tst_name_mod]["history"][
336 append(tst_data["throughput"]["value"])
337 elif tst_data["type"] == "NDRPDR":
338 tbl_dict[tst_name_mod]["history"][item[
339 "title"]].append(tst_data["throughput"][
343 except (TypeError, KeyError):
347 for tst_name in tbl_dict.keys():
348 item = [tbl_dict[tst_name]["name"], ]
350 if tbl_dict[tst_name].get("history", None) is not None:
351 for hist_data in tbl_dict[tst_name]["history"].values():
353 item.append(round(mean(hist_data) / 1000000, 2))
354 item.append(round(stdev(hist_data) / 1000000, 2))
356 item.extend([None, None])
358 item.extend([None, None])
359 data_t = tbl_dict[tst_name]["ref-data"]
361 item.append(round(mean(data_t) / 1000000, 2))
362 item.append(round(stdev(data_t) / 1000000, 2))
364 item.extend([None, None])
365 data_t = tbl_dict[tst_name]["cmp-data"]
367 item.append(round(mean(data_t) / 1000000, 2))
368 item.append(round(stdev(data_t) / 1000000, 2))
370 item.extend([None, None])
371 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
372 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
373 if len(item) == len(header):
376 # Sort the table according to the relative change
377 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
379 # Generate csv tables:
380 csv_file = "{0}.csv".format(table["output-file"])
381 with open(csv_file, "w") as file_handler:
382 file_handler.write(header_str)
384 file_handler.write(",".join([str(item) for item in test]) + "\n")
386 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
389 def table_performance_trending_dashboard(table, input_data):
390 """Generate the table(s) with algorithm:
391 table_performance_trending_dashboard
392 specified in the specification file.
394 :param table: Table to generate.
395 :param input_data: Data to process.
396 :type table: pandas.Series
397 :type input_data: InputData
400 logging.info(" Generating the table {0} ...".
401 format(table.get("title", "")))
404 logging.info(" Creating the data set for the {0} '{1}'.".
405 format(table.get("type", ""), table.get("title", "")))
406 data = input_data.filter_data(table, continue_on_error=True)
408 # Prepare the header of the tables
409 header = ["Test Case",
411 "Short-Term Change [%]",
412 "Long-Term Change [%]",
416 header_str = ",".join(header) + "\n"
418 # Prepare data to the table:
420 for job, builds in table["data"].items():
422 for tst_name, tst_data in data[job][str(build)].iteritems():
423 if tst_name.lower() in table["ignore-list"]:
425 if tbl_dict.get(tst_name, None) is None:
426 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
428 tbl_dict[tst_name] = {"name": name,
429 "data": OrderedDict()}
431 tbl_dict[tst_name]["data"][str(build)] = \
432 tst_data["result"]["receive-rate"]
433 except (TypeError, KeyError):
434 pass # No data in output.xml for this test
437 for tst_name in tbl_dict.keys():
438 data_t = tbl_dict[tst_name]["data"]
442 classification_lst, avgs = classify_anomalies(data_t)
444 win_size = min(len(data_t), table["window"])
445 long_win_size = min(len(data_t), table["long-trend-window"])
449 [x for x in avgs[-long_win_size:-win_size]
454 avg_week_ago = avgs[max(-win_size, -len(avgs))]
456 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
457 rel_change_last = nan
459 rel_change_last = round(
460 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
462 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
463 rel_change_long = nan
465 rel_change_long = round(
466 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
468 if classification_lst:
469 if isnan(rel_change_last) and isnan(rel_change_long):
472 [tbl_dict[tst_name]["name"],
473 '-' if isnan(last_avg) else
474 round(last_avg / 1000000, 2),
475 '-' if isnan(rel_change_last) else rel_change_last,
476 '-' if isnan(rel_change_long) else rel_change_long,
477 classification_lst[-win_size:].count("regression"),
478 classification_lst[-win_size:].count("progression")])
480 tbl_lst.sort(key=lambda rel: rel[0])
483 for nrr in range(table["window"], -1, -1):
484 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
485 for nrp in range(table["window"], -1, -1):
486 tbl_out = [item for item in tbl_reg if item[5] == nrp]
487 tbl_out.sort(key=lambda rel: rel[2])
488 tbl_sorted.extend(tbl_out)
490 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
492 logging.info(" Writing file: '{0}'".format(file_name))
493 with open(file_name, "w") as file_handler:
494 file_handler.write(header_str)
495 for test in tbl_sorted:
496 file_handler.write(",".join([str(item) for item in test]) + '\n')
498 txt_file_name = "{0}.txt".format(table["output-file"])
499 logging.info(" Writing file: '{0}'".format(txt_file_name))
500 convert_csv_to_pretty_txt(file_name, txt_file_name)
503 def _generate_url(base, test_name):
504 """Generate URL to a trending plot from the name of the test case.
506 :param base: The base part of URL common to all test cases.
507 :param test_name: The name of the test case.
510 :returns: The URL to the plot with the trending data for the given test
520 if "lbdpdk" in test_name or "lbvpp" in test_name:
521 file_name = "link_bonding.html"
523 elif "testpmd" in test_name or "l3fwd" in test_name:
524 file_name = "dpdk.html"
526 elif "memif" in test_name:
527 file_name = "container_memif.html"
529 elif "srv6" in test_name:
530 file_name = "srv6.html"
532 elif "vhost" in test_name:
533 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
534 file_name = "vm_vhost_l2.html"
535 elif "ip4base" in test_name:
536 file_name = "vm_vhost_ip4.html"
538 elif "ipsec" in test_name:
539 file_name = "ipsec.html"
541 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
542 file_name = "ip4_tunnels.html"
544 elif "ip4base" in test_name or "ip4scale" in test_name:
545 file_name = "ip4.html"
546 if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
547 feature = "-features"
549 elif "ip6base" in test_name or "ip6scale" in test_name:
550 file_name = "ip6.html"
552 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
553 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
554 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
555 file_name = "l2.html"
556 if "iacl" in test_name:
557 feature = "-features"
559 if "x520" in test_name:
561 elif "x710" in test_name:
563 elif "xl710" in test_name:
566 if "64b" in test_name:
568 elif "78b" in test_name:
570 elif "imix" in test_name:
572 elif "9000b" in test_name:
574 elif "1518" in test_name:
577 if "1t1c" in test_name:
579 elif "2t2c" in test_name:
581 elif "4t4c" in test_name:
584 return url + file_name + anchor + feature
587 def table_performance_trending_dashboard_html(table, input_data):
588 """Generate the table(s) with algorithm:
589 table_performance_trending_dashboard_html specified in the specification
592 :param table: Table to generate.
593 :param input_data: Data to process.
594 :type table: pandas.Series
595 :type input_data: InputData
598 logging.info(" Generating the table {0} ...".
599 format(table.get("title", "")))
602 with open(table["input-file"], 'rb') as csv_file:
603 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
604 csv_lst = [item for item in csv_content]
606 logging.warning("The input file is not defined.")
608 except csv.Error as err:
609 logging.warning("Not possible to process the file '{0}'.\n{1}".
610 format(table["input-file"], err))
614 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
617 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
618 for idx, item in enumerate(csv_lst[0]):
619 alignment = "left" if idx == 0 else "center"
620 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
624 colors = {"regression": ("#ffcccc", "#ff9999"),
625 "progression": ("#c6ecc6", "#9fdf9f"),
626 "normal": ("#e9f1fb", "#d4e4f7")}
627 for r_idx, row in enumerate(csv_lst[1:]):
631 color = "progression"
634 background = colors[color][r_idx % 2]
635 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
638 for c_idx, item in enumerate(row):
639 alignment = "left" if c_idx == 0 else "center"
640 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
643 url = _generate_url("../trending/", item)
644 ref = ET.SubElement(td, "a", attrib=dict(href=url))
649 with open(table["output-file"], 'w') as html_file:
650 logging.info(" Writing file: '{0}'".format(table["output-file"]))
651 html_file.write(".. raw:: html\n\n\t")
652 html_file.write(ET.tostring(dashboard))
653 html_file.write("\n\t<p><br><br></p>\n")
655 logging.warning("The output file is not defined.")
659 def table_failed_tests(table, input_data):
660 """Generate the table(s) with algorithm: table_failed_tests
661 specified in the specification file.
663 :param table: Table to generate.
664 :param input_data: Data to process.
665 :type table: pandas.Series
666 :type input_data: InputData
669 logging.info(" Generating the table {0} ...".
670 format(table.get("title", "")))
673 logging.info(" Creating the data set for the {0} '{1}'.".
674 format(table.get("type", ""), table.get("title", "")))
675 data = input_data.filter_data(table, continue_on_error=True)
677 # Prepare the header of the tables
678 header = ["Test Case",
680 "Last Failure [Time]",
681 "Last Failure [VPP-Build-Id]",
682 "Last Failure [CSIT-Job-Build-Id]"]
684 # Generate the data for the table according to the model in the table
687 for job, builds in table["data"].items():
690 for tst_name, tst_data in data[job][build].iteritems():
691 if tst_name.lower() in table["ignore-list"]:
693 if tbl_dict.get(tst_name, None) is None:
694 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
696 tbl_dict[tst_name] = {"name": name,
697 "data": OrderedDict()}
699 tbl_dict[tst_name]["data"][build] = (
701 input_data.metadata(job, build).get("generated", ""),
702 input_data.metadata(job, build).get("version", ""),
704 except (TypeError, KeyError):
705 pass # No data in output.xml for this test
708 for tst_data in tbl_dict.values():
709 win_size = min(len(tst_data["data"]), table["window"])
711 for val in tst_data["data"].values()[-win_size:]:
714 fails_last_date = val[1]
715 fails_last_vpp = val[2]
716 fails_last_csit = val[3]
718 tbl_lst.append([tst_data["name"],
722 "mrr-daily-build-{0}".format(fails_last_csit)])
724 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
726 for nrf in range(table["window"], -1, -1):
727 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
728 tbl_sorted.extend(tbl_fails)
729 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
731 logging.info(" Writing file: '{0}'".format(file_name))
732 with open(file_name, "w") as file_handler:
733 file_handler.write(",".join(header) + "\n")
734 for test in tbl_sorted:
735 file_handler.write(",".join([str(item) for item in test]) + '\n')
737 txt_file_name = "{0}.txt".format(table["output-file"])
738 logging.info(" Writing file: '{0}'".format(txt_file_name))
739 convert_csv_to_pretty_txt(file_name, txt_file_name)
742 def table_failed_tests_html(table, input_data):
743 """Generate the table(s) with algorithm: table_failed_tests_html
744 specified in the specification file.
746 :param table: Table to generate.
747 :param input_data: Data to process.
748 :type table: pandas.Series
749 :type input_data: InputData
752 logging.info(" Generating the table {0} ...".
753 format(table.get("title", "")))
756 with open(table["input-file"], 'rb') as csv_file:
757 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
758 csv_lst = [item for item in csv_content]
760 logging.warning("The input file is not defined.")
762 except csv.Error as err:
763 logging.warning("Not possible to process the file '{0}'.\n{1}".
764 format(table["input-file"], err))
768 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
771 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
772 for idx, item in enumerate(csv_lst[0]):
773 alignment = "left" if idx == 0 else "center"
774 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
778 colors = ("#e9f1fb", "#d4e4f7")
779 for r_idx, row in enumerate(csv_lst[1:]):
780 background = colors[r_idx % 2]
781 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
784 for c_idx, item in enumerate(row):
785 alignment = "left" if c_idx == 0 else "center"
786 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
789 url = _generate_url("../trending/", item)
790 ref = ET.SubElement(td, "a", attrib=dict(href=url))
795 with open(table["output-file"], 'w') as html_file:
796 logging.info(" Writing file: '{0}'".format(table["output-file"]))
797 html_file.write(".. raw:: html\n\n\t")
798 html_file.write(ET.tostring(failed_tests))
799 html_file.write("\n\t<p><br><br></p>\n")
801 logging.warning("The output file is not defined.")