1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Rec Rate"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
238 topo = "2n-skx" if "2n-skx" in job else ""
240 for tst_name, tst_data in data[job][str(build)].iteritems():
241 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242 replace("-ndrpdr", "").replace("-pdrdisc", "").\
243 replace("-ndrdisc", "").replace("-pdr", "").\
244 replace("-ndr", "").\
245 replace("1t1c", "1c").replace("2t1c", "1c").\
246 replace("2t2c", "2c").replace("4t2c", "2c").\
247 replace("4t4c", "4c").replace("8t4c", "4c")
248 if "across topologies" in table["title"].lower():
249 tst_name_mod = tst_name_mod.replace("2n1l-", "")
250 if tbl_dict.get(tst_name_mod, None) is None:
251 groups = re.search(REGEX_NIC, tst_data["parent"])
252 nic = groups.group(0) if groups else ""
253 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
255 if "across testbeds" in table["title"].lower() or \
256 "across topologies" in table["title"].lower():
258 replace("1t1c", "1c").replace("2t1c", "1c").\
259 replace("2t2c", "2c").replace("4t2c", "2c").\
260 replace("4t4c", "4c").replace("8t4c", "4c")
261 tbl_dict[tst_name_mod] = {"name": name,
265 # TODO: Re-work when NDRPDRDISC tests are not used
266 if table["include-tests"] == "MRR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["result"]["receive-rate"].avg)
269 elif table["include-tests"] == "PDR":
270 if tst_data["type"] == "PDR":
271 tbl_dict[tst_name_mod]["ref-data"]. \
272 append(tst_data["throughput"]["value"])
273 elif tst_data["type"] == "NDRPDR":
274 tbl_dict[tst_name_mod]["ref-data"].append(
275 tst_data["throughput"]["PDR"]["LOWER"])
276 elif table["include-tests"] == "NDR":
277 if tst_data["type"] == "NDR":
278 tbl_dict[tst_name_mod]["ref-data"]. \
279 append(tst_data["throughput"]["value"])
280 elif tst_data["type"] == "NDRPDR":
281 tbl_dict[tst_name_mod]["ref-data"].append(
282 tst_data["throughput"]["NDR"]["LOWER"])
286 pass # No data in output.xml for this test
288 for job, builds in table["compare"]["data"].items():
290 for tst_name, tst_data in data[job][str(build)].iteritems():
291 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293 replace("-ndrdisc", "").replace("-pdr", ""). \
294 replace("-ndr", "").\
295 replace("1t1c", "1c").replace("2t1c", "1c").\
296 replace("2t2c", "2c").replace("4t2c", "2c").\
297 replace("4t4c", "4c").replace("8t4c", "4c")
298 if "across topologies" in table["title"].lower():
299 tst_name_mod = tst_name_mod.replace("2n1l-", "")
300 if tbl_dict.get(tst_name_mod, None) is None:
301 groups = re.search(REGEX_NIC, tst_data["parent"])
302 nic = groups.group(0) if groups else ""
303 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
305 if "across testbeds" in table["title"].lower() or \
306 "across topologies" in table["title"].lower():
308 replace("1t1c", "1c").replace("2t1c", "1c").\
309 replace("2t2c", "2c").replace("4t2c", "2c").\
310 replace("4t4c", "4c").replace("8t4c", "4c")
311 tbl_dict[tst_name_mod] = {"name": name,
315 # TODO: Re-work when NDRPDRDISC tests are not used
316 if table["include-tests"] == "MRR":
317 tbl_dict[tst_name_mod]["cmp-data"]. \
318 append(tst_data["result"]["receive-rate"].avg)
319 elif table["include-tests"] == "PDR":
320 if tst_data["type"] == "PDR":
321 tbl_dict[tst_name_mod]["cmp-data"]. \
322 append(tst_data["throughput"]["value"])
323 elif tst_data["type"] == "NDRPDR":
324 tbl_dict[tst_name_mod]["cmp-data"].append(
325 tst_data["throughput"]["PDR"]["LOWER"])
326 elif table["include-tests"] == "NDR":
327 if tst_data["type"] == "NDR":
328 tbl_dict[tst_name_mod]["cmp-data"]. \
329 append(tst_data["throughput"]["value"])
330 elif tst_data["type"] == "NDRPDR":
331 tbl_dict[tst_name_mod]["cmp-data"].append(
332 tst_data["throughput"]["NDR"]["LOWER"])
335 except (KeyError, TypeError):
339 for job, builds in item["data"].items():
341 for tst_name, tst_data in data[job][str(build)].iteritems():
342 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344 replace("-ndrdisc", "").replace("-pdr", ""). \
345 replace("-ndr", "").\
346 replace("1t1c", "1c").replace("2t1c", "1c").\
347 replace("2t2c", "2c").replace("4t2c", "2c").\
348 replace("4t4c", "4c").replace("8t4c", "4c")
349 if "across topologies" in table["title"].lower():
350 tst_name_mod = tst_name_mod.replace("2n1l-", "")
351 if tbl_dict.get(tst_name_mod, None) is None:
353 if tbl_dict[tst_name_mod].get("history", None) is None:
354 tbl_dict[tst_name_mod]["history"] = OrderedDict()
355 if tbl_dict[tst_name_mod]["history"].get(item["title"],
357 tbl_dict[tst_name_mod]["history"][item["title"]] = \
360 # TODO: Re-work when NDRPDRDISC tests are not used
361 if table["include-tests"] == "MRR":
362 tbl_dict[tst_name_mod]["history"][item["title"
363 ]].append(tst_data["result"]["receive-rate"].
365 elif table["include-tests"] == "PDR":
366 if tst_data["type"] == "PDR":
367 tbl_dict[tst_name_mod]["history"][
369 append(tst_data["throughput"]["value"])
370 elif tst_data["type"] == "NDRPDR":
371 tbl_dict[tst_name_mod]["history"][item[
372 "title"]].append(tst_data["throughput"][
374 elif table["include-tests"] == "NDR":
375 if tst_data["type"] == "NDR":
376 tbl_dict[tst_name_mod]["history"][
378 append(tst_data["throughput"]["value"])
379 elif tst_data["type"] == "NDRPDR":
380 tbl_dict[tst_name_mod]["history"][item[
381 "title"]].append(tst_data["throughput"][
385 except (TypeError, KeyError):
390 for tst_name in tbl_dict.keys():
391 item = [tbl_dict[tst_name]["name"], ]
393 if tbl_dict[tst_name].get("history", None) is not None:
394 for hist_data in tbl_dict[tst_name]["history"].values():
396 item.append(round(mean(hist_data) / 1000000, 2))
397 item.append(round(stdev(hist_data) / 1000000, 2))
399 item.extend(["Not tested", "Not tested"])
401 item.extend(["Not tested", "Not tested"])
402 data_t = tbl_dict[tst_name]["ref-data"]
404 item.append(round(mean(data_t) / 1000000, 2))
405 item.append(round(stdev(data_t) / 1000000, 2))
407 item.extend(["Not tested", "Not tested"])
408 data_t = tbl_dict[tst_name]["cmp-data"]
410 item.append(round(mean(data_t) / 1000000, 2))
411 item.append(round(stdev(data_t) / 1000000, 2))
413 item.extend(["Not tested", "Not tested"])
414 if item[-2] == "Not tested":
416 elif item[-4] == "Not tested":
417 item.append("New in CSIT-1908")
418 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419 item.append("See footnote [1]")
422 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
423 if (len(item) == len(header)) and (item[-3] != "Not tested"):
426 # Sort the table according to the relative change
427 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
429 # Generate csv tables:
430 csv_file = "{0}.csv".format(table["output-file"])
431 with open(csv_file, "w") as file_handler:
432 file_handler.write(header_str)
434 file_handler.write(",".join([str(item) for item in test]) + "\n")
436 txt_file_name = "{0}.txt".format(table["output-file"])
437 convert_csv_to_pretty_txt(csv_file, txt_file_name)
440 with open(txt_file_name, 'a') as txt_file:
441 txt_file.writelines([
443 "[1] CSIT-1908 changed test methodology of dot1q tests in "
444 "2n-skx testbeds, dot1q encapsulation is now used on both "
446 " Previously dot1q was used only on a single link with the "
447 "other link carrying untagged Ethernet frames. This change "
449 " in slightly lower throughput in CSIT-1908 for these "
450 "tests. See release notes."
454 def table_performance_comparison_nic(table, input_data):
455 """Generate the table(s) with algorithm: table_performance_comparison
456 specified in the specification file.
458 :param table: Table to generate.
459 :param input_data: Data to process.
460 :type table: pandas.Series
461 :type input_data: InputData
464 logging.info(" Generating the table {0} ...".
465 format(table.get("title", "")))
468 logging.info(" Creating the data set for the {0} '{1}'.".
469 format(table.get("type", ""), table.get("title", "")))
470 data = input_data.filter_data(table, continue_on_error=True)
472 # Prepare the header of the tables
474 header = ["Test case", ]
476 if table["include-tests"] == "MRR":
477 hdr_param = "Rec Rate"
481 history = table.get("history", None)
485 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
486 "{0} Stdev [Mpps]".format(item["title"])])
488 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
489 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
490 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
491 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
493 header_str = ",".join(header) + "\n"
494 except (AttributeError, KeyError) as err:
495 logging.error("The model is invalid, missing parameter: {0}".
499 # Prepare data to the table:
501 for job, builds in table["reference"]["data"].items():
502 topo = "2n-skx" if "2n-skx" in job else ""
504 for tst_name, tst_data in data[job][str(build)].iteritems():
505 if table["reference"]["nic"] not in tst_data["tags"]:
507 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
508 replace("-ndrpdr", "").replace("-pdrdisc", "").\
509 replace("-ndrdisc", "").replace("-pdr", "").\
510 replace("-ndr", "").\
511 replace("1t1c", "1c").replace("2t1c", "1c").\
512 replace("2t2c", "2c").replace("4t2c", "2c").\
513 replace("4t4c", "4c").replace("8t4c", "4c")
514 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
515 if "across topologies" in table["title"].lower():
516 tst_name_mod = tst_name_mod.replace("2n1l-", "")
517 if tbl_dict.get(tst_name_mod, None) is None:
518 name = "{0}".format("-".join(tst_data["name"].
520 if "across testbeds" in table["title"].lower() or \
521 "across topologies" in table["title"].lower():
523 replace("1t1c", "1c").replace("2t1c", "1c").\
524 replace("2t2c", "2c").replace("4t2c", "2c").\
525 replace("4t4c", "4c").replace("8t4c", "4c")
526 tbl_dict[tst_name_mod] = {"name": name,
530 # TODO: Re-work when NDRPDRDISC tests are not used
531 if table["include-tests"] == "MRR":
532 tbl_dict[tst_name_mod]["ref-data"]. \
533 append(tst_data["result"]["receive-rate"].avg)
534 elif table["include-tests"] == "PDR":
535 if tst_data["type"] == "PDR":
536 tbl_dict[tst_name_mod]["ref-data"]. \
537 append(tst_data["throughput"]["value"])
538 elif tst_data["type"] == "NDRPDR":
539 tbl_dict[tst_name_mod]["ref-data"].append(
540 tst_data["throughput"]["PDR"]["LOWER"])
541 elif table["include-tests"] == "NDR":
542 if tst_data["type"] == "NDR":
543 tbl_dict[tst_name_mod]["ref-data"]. \
544 append(tst_data["throughput"]["value"])
545 elif tst_data["type"] == "NDRPDR":
546 tbl_dict[tst_name_mod]["ref-data"].append(
547 tst_data["throughput"]["NDR"]["LOWER"])
551 pass # No data in output.xml for this test
553 for job, builds in table["compare"]["data"].items():
555 for tst_name, tst_data in data[job][str(build)].iteritems():
556 if table["compare"]["nic"] not in tst_data["tags"]:
558 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
559 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
560 replace("-ndrdisc", "").replace("-pdr", ""). \
561 replace("-ndr", "").\
562 replace("1t1c", "1c").replace("2t1c", "1c").\
563 replace("2t2c", "2c").replace("4t2c", "2c").\
564 replace("4t4c", "4c").replace("8t4c", "4c")
565 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
566 if "across topologies" in table["title"].lower():
567 tst_name_mod = tst_name_mod.replace("2n1l-", "")
568 if tbl_dict.get(tst_name_mod, None) is None:
569 name = "{0}".format("-".join(tst_data["name"].
571 if "across testbeds" in table["title"].lower() or \
572 "across topologies" in table["title"].lower():
574 replace("1t1c", "1c").replace("2t1c", "1c").\
575 replace("2t2c", "2c").replace("4t2c", "2c").\
576 replace("4t4c", "4c").replace("8t4c", "4c")
577 tbl_dict[tst_name_mod] = {"name": name,
581 # TODO: Re-work when NDRPDRDISC tests are not used
582 if table["include-tests"] == "MRR":
583 tbl_dict[tst_name_mod]["cmp-data"]. \
584 append(tst_data["result"]["receive-rate"].avg)
585 elif table["include-tests"] == "PDR":
586 if tst_data["type"] == "PDR":
587 tbl_dict[tst_name_mod]["cmp-data"]. \
588 append(tst_data["throughput"]["value"])
589 elif tst_data["type"] == "NDRPDR":
590 tbl_dict[tst_name_mod]["cmp-data"].append(
591 tst_data["throughput"]["PDR"]["LOWER"])
592 elif table["include-tests"] == "NDR":
593 if tst_data["type"] == "NDR":
594 tbl_dict[tst_name_mod]["cmp-data"]. \
595 append(tst_data["throughput"]["value"])
596 elif tst_data["type"] == "NDRPDR":
597 tbl_dict[tst_name_mod]["cmp-data"].append(
598 tst_data["throughput"]["NDR"]["LOWER"])
601 except (KeyError, TypeError):
606 for job, builds in item["data"].items():
608 for tst_name, tst_data in data[job][str(build)].iteritems():
609 if item["nic"] not in tst_data["tags"]:
611 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
612 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
613 replace("-ndrdisc", "").replace("-pdr", ""). \
614 replace("-ndr", "").\
615 replace("1t1c", "1c").replace("2t1c", "1c").\
616 replace("2t2c", "2c").replace("4t2c", "2c").\
617 replace("4t4c", "4c").replace("8t4c", "4c")
618 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
619 if "across topologies" in table["title"].lower():
620 tst_name_mod = tst_name_mod.replace("2n1l-", "")
621 if tbl_dict.get(tst_name_mod, None) is None:
623 if tbl_dict[tst_name_mod].get("history", None) is None:
624 tbl_dict[tst_name_mod]["history"] = OrderedDict()
625 if tbl_dict[tst_name_mod]["history"].get(item["title"],
627 tbl_dict[tst_name_mod]["history"][item["title"]] = \
630 # TODO: Re-work when NDRPDRDISC tests are not used
631 if table["include-tests"] == "MRR":
632 tbl_dict[tst_name_mod]["history"][item["title"
633 ]].append(tst_data["result"]["receive-rate"].
635 elif table["include-tests"] == "PDR":
636 if tst_data["type"] == "PDR":
637 tbl_dict[tst_name_mod]["history"][
639 append(tst_data["throughput"]["value"])
640 elif tst_data["type"] == "NDRPDR":
641 tbl_dict[tst_name_mod]["history"][item[
642 "title"]].append(tst_data["throughput"][
644 elif table["include-tests"] == "NDR":
645 if tst_data["type"] == "NDR":
646 tbl_dict[tst_name_mod]["history"][
648 append(tst_data["throughput"]["value"])
649 elif tst_data["type"] == "NDRPDR":
650 tbl_dict[tst_name_mod]["history"][item[
651 "title"]].append(tst_data["throughput"][
655 except (TypeError, KeyError):
660 for tst_name in tbl_dict.keys():
661 item = [tbl_dict[tst_name]["name"], ]
663 if tbl_dict[tst_name].get("history", None) is not None:
664 for hist_data in tbl_dict[tst_name]["history"].values():
666 item.append(round(mean(hist_data) / 1000000, 2))
667 item.append(round(stdev(hist_data) / 1000000, 2))
669 item.extend(["Not tested", "Not tested"])
671 item.extend(["Not tested", "Not tested"])
672 data_t = tbl_dict[tst_name]["ref-data"]
674 item.append(round(mean(data_t) / 1000000, 2))
675 item.append(round(stdev(data_t) / 1000000, 2))
677 item.extend(["Not tested", "Not tested"])
678 data_t = tbl_dict[tst_name]["cmp-data"]
680 item.append(round(mean(data_t) / 1000000, 2))
681 item.append(round(stdev(data_t) / 1000000, 2))
683 item.extend(["Not tested", "Not tested"])
684 if item[-2] == "Not tested":
686 elif item[-4] == "Not tested":
687 item.append("New in CSIT-1908")
688 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
689 item.append("See footnote [1]")
692 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
693 if (len(item) == len(header)) and (item[-3] != "Not tested"):
696 # Sort the table according to the relative change
697 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
699 # Generate csv tables:
700 csv_file = "{0}.csv".format(table["output-file"])
701 with open(csv_file, "w") as file_handler:
702 file_handler.write(header_str)
704 file_handler.write(",".join([str(item) for item in test]) + "\n")
706 txt_file_name = "{0}.txt".format(table["output-file"])
707 convert_csv_to_pretty_txt(csv_file, txt_file_name)
710 with open(txt_file_name, 'a') as txt_file:
711 txt_file.writelines([
713 "[1] CSIT-1908 changed test methodology of dot1q tests in "
714 "2n-skx testbeds, dot1q encapsulation is now used on both "
716 " Previously dot1q was used only on a single link with the "
717 "other link carrying untagged Ethernet frames. This change "
719 " in slightly lower throughput in CSIT-1908 for these "
720 "tests. See release notes."
724 def table_nics_comparison(table, input_data):
725 """Generate the table(s) with algorithm: table_nics_comparison
726 specified in the specification file.
728 :param table: Table to generate.
729 :param input_data: Data to process.
730 :type table: pandas.Series
731 :type input_data: InputData
734 logging.info(" Generating the table {0} ...".
735 format(table.get("title", "")))
738 logging.info(" Creating the data set for the {0} '{1}'.".
739 format(table.get("type", ""), table.get("title", "")))
740 data = input_data.filter_data(table, continue_on_error=True)
742 # Prepare the header of the tables
744 header = ["Test case", ]
746 if table["include-tests"] == "MRR":
747 hdr_param = "Rec Rate"
752 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
753 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
754 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
755 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
757 header_str = ",".join(header) + "\n"
758 except (AttributeError, KeyError) as err:
759 logging.error("The model is invalid, missing parameter: {0}".
763 # Prepare data to the table:
765 for job, builds in table["data"].items():
767 for tst_name, tst_data in data[job][str(build)].iteritems():
768 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
769 replace("-ndrpdr", "").replace("-pdrdisc", "").\
770 replace("-ndrdisc", "").replace("-pdr", "").\
771 replace("-ndr", "").\
772 replace("1t1c", "1c").replace("2t1c", "1c").\
773 replace("2t2c", "2c").replace("4t2c", "2c").\
774 replace("4t4c", "4c").replace("8t4c", "4c")
775 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
776 if tbl_dict.get(tst_name_mod, None) is None:
777 name = "-".join(tst_data["name"].split("-")[:-1])
778 tbl_dict[tst_name_mod] = {"name": name,
782 if table["include-tests"] == "MRR":
783 result = tst_data["result"]["receive-rate"].avg
784 elif table["include-tests"] == "PDR":
785 result = tst_data["throughput"]["PDR"]["LOWER"]
786 elif table["include-tests"] == "NDR":
787 result = tst_data["throughput"]["NDR"]["LOWER"]
792 if table["reference"]["nic"] in tst_data["tags"]:
793 tbl_dict[tst_name_mod]["ref-data"].append(result)
794 elif table["compare"]["nic"] in tst_data["tags"]:
795 tbl_dict[tst_name_mod]["cmp-data"].append(result)
796 except (TypeError, KeyError) as err:
797 logging.debug("No data for {0}".format(tst_name))
798 logging.debug(repr(err))
799 # No data in output.xml for this test
802 for tst_name in tbl_dict.keys():
803 item = [tbl_dict[tst_name]["name"], ]
804 data_t = tbl_dict[tst_name]["ref-data"]
806 item.append(round(mean(data_t) / 1000000, 2))
807 item.append(round(stdev(data_t) / 1000000, 2))
809 item.extend([None, None])
810 data_t = tbl_dict[tst_name]["cmp-data"]
812 item.append(round(mean(data_t) / 1000000, 2))
813 item.append(round(stdev(data_t) / 1000000, 2))
815 item.extend([None, None])
816 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
817 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
818 if len(item) == len(header):
821 # Sort the table according to the relative change
822 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
824 # Generate csv tables:
825 csv_file = "{0}.csv".format(table["output-file"])
826 with open(csv_file, "w") as file_handler:
827 file_handler.write(header_str)
829 file_handler.write(",".join([str(item) for item in test]) + "\n")
831 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
834 def table_soak_vs_ndr(table, input_data):
835 """Generate the table(s) with algorithm: table_soak_vs_ndr
836 specified in the specification file.
838 :param table: Table to generate.
839 :param input_data: Data to process.
840 :type table: pandas.Series
841 :type input_data: InputData
844 logging.info(" Generating the table {0} ...".
845 format(table.get("title", "")))
848 logging.info(" Creating the data set for the {0} '{1}'.".
849 format(table.get("type", ""), table.get("title", "")))
850 data = input_data.filter_data(table, continue_on_error=True)
852 # Prepare the header of the table
856 "{0} Thput [Mpps]".format(table["reference"]["title"]),
857 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
858 "{0} Thput [Mpps]".format(table["compare"]["title"]),
859 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
860 "Delta [%]", "Stdev of delta [%]"]
861 header_str = ",".join(header) + "\n"
862 except (AttributeError, KeyError) as err:
863 logging.error("The model is invalid, missing parameter: {0}".
867 # Create a list of available SOAK test results:
869 for job, builds in table["compare"]["data"].items():
871 for tst_name, tst_data in data[job][str(build)].iteritems():
872 if tst_data["type"] == "SOAK":
873 tst_name_mod = tst_name.replace("-soak", "")
874 if tbl_dict.get(tst_name_mod, None) is None:
875 groups = re.search(REGEX_NIC, tst_data["parent"])
876 nic = groups.group(0) if groups else ""
877 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
879 tbl_dict[tst_name_mod] = {
885 tbl_dict[tst_name_mod]["cmp-data"].append(
886 tst_data["throughput"]["LOWER"])
887 except (KeyError, TypeError):
889 tests_lst = tbl_dict.keys()
891 # Add corresponding NDR test results:
892 for job, builds in table["reference"]["data"].items():
894 for tst_name, tst_data in data[job][str(build)].iteritems():
895 tst_name_mod = tst_name.replace("-ndrpdr", "").\
897 if tst_name_mod in tests_lst:
899 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
900 if table["include-tests"] == "MRR":
901 result = tst_data["result"]["receive-rate"].avg
902 elif table["include-tests"] == "PDR":
903 result = tst_data["throughput"]["PDR"]["LOWER"]
904 elif table["include-tests"] == "NDR":
905 result = tst_data["throughput"]["NDR"]["LOWER"]
908 if result is not None:
909 tbl_dict[tst_name_mod]["ref-data"].append(
911 except (KeyError, TypeError):
915 for tst_name in tbl_dict.keys():
916 item = [tbl_dict[tst_name]["name"], ]
917 data_r = tbl_dict[tst_name]["ref-data"]
919 data_r_mean = mean(data_r)
920 item.append(round(data_r_mean / 1000000, 2))
921 data_r_stdev = stdev(data_r)
922 item.append(round(data_r_stdev / 1000000, 2))
926 item.extend([None, None])
927 data_c = tbl_dict[tst_name]["cmp-data"]
929 data_c_mean = mean(data_c)
930 item.append(round(data_c_mean / 1000000, 2))
931 data_c_stdev = stdev(data_c)
932 item.append(round(data_c_stdev / 1000000, 2))
936 item.extend([None, None])
937 if data_r_mean and data_c_mean:
938 delta, d_stdev = relative_change_stdev(
939 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
940 item.append(round(delta, 2))
941 item.append(round(d_stdev, 2))
944 # Sort the table according to the relative change
945 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
947 # Generate csv tables:
948 csv_file = "{0}.csv".format(table["output-file"])
949 with open(csv_file, "w") as file_handler:
950 file_handler.write(header_str)
952 file_handler.write(",".join([str(item) for item in test]) + "\n")
954 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
957 def table_performance_trending_dashboard(table, input_data):
958 """Generate the table(s) with algorithm:
959 table_performance_trending_dashboard
960 specified in the specification file.
962 :param table: Table to generate.
963 :param input_data: Data to process.
964 :type table: pandas.Series
965 :type input_data: InputData
968 logging.info(" Generating the table {0} ...".
969 format(table.get("title", "")))
972 logging.info(" Creating the data set for the {0} '{1}'.".
973 format(table.get("type", ""), table.get("title", "")))
974 data = input_data.filter_data(table, continue_on_error=True)
976 # Prepare the header of the tables
977 header = ["Test Case",
979 "Short-Term Change [%]",
980 "Long-Term Change [%]",
984 header_str = ",".join(header) + "\n"
986 # Prepare data to the table:
988 for job, builds in table["data"].items():
990 for tst_name, tst_data in data[job][str(build)].iteritems():
991 if tst_name.lower() in table.get("ignore-list", list()):
993 if tbl_dict.get(tst_name, None) is None:
994 groups = re.search(REGEX_NIC, tst_data["parent"])
997 nic = groups.group(0)
998 tbl_dict[tst_name] = {
999 "name": "{0}-{1}".format(nic, tst_data["name"]),
1000 "data": OrderedDict()}
1002 tbl_dict[tst_name]["data"][str(build)] = \
1003 tst_data["result"]["receive-rate"]
1004 except (TypeError, KeyError):
1005 pass # No data in output.xml for this test
1008 for tst_name in tbl_dict.keys():
1009 data_t = tbl_dict[tst_name]["data"]
1013 classification_lst, avgs = classify_anomalies(data_t)
1015 win_size = min(len(data_t), table["window"])
1016 long_win_size = min(len(data_t), table["long-trend-window"])
1020 [x for x in avgs[-long_win_size:-win_size]
1025 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1027 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1028 rel_change_last = nan
1030 rel_change_last = round(
1031 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1033 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1034 rel_change_long = nan
1036 rel_change_long = round(
1037 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1039 if classification_lst:
1040 if isnan(rel_change_last) and isnan(rel_change_long):
1042 if (isnan(last_avg) or
1043 isnan(rel_change_last) or
1044 isnan(rel_change_long)):
1047 [tbl_dict[tst_name]["name"],
1048 round(last_avg / 1000000, 2),
1051 classification_lst[-win_size:].count("regression"),
1052 classification_lst[-win_size:].count("progression")])
1054 tbl_lst.sort(key=lambda rel: rel[0])
1057 for nrr in range(table["window"], -1, -1):
1058 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1059 for nrp in range(table["window"], -1, -1):
1060 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1061 tbl_out.sort(key=lambda rel: rel[2])
1062 tbl_sorted.extend(tbl_out)
1064 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1066 logging.info(" Writing file: '{0}'".format(file_name))
1067 with open(file_name, "w") as file_handler:
1068 file_handler.write(header_str)
1069 for test in tbl_sorted:
1070 file_handler.write(",".join([str(item) for item in test]) + '\n')
1072 txt_file_name = "{0}.txt".format(table["output-file"])
1073 logging.info(" Writing file: '{0}'".format(txt_file_name))
1074 convert_csv_to_pretty_txt(file_name, txt_file_name)
1077 def _generate_url(base, testbed, test_name):
1078 """Generate URL to a trending plot from the name of the test case.
1080 :param base: The base part of URL common to all test cases.
1081 :param testbed: The testbed used for testing.
1082 :param test_name: The name of the test case.
1085 :type test_name: str
1086 :returns: The URL to the plot with the trending data for the given test
1096 if "lbdpdk" in test_name or "lbvpp" in test_name:
1097 file_name = "link_bonding"
1099 elif "114b" in test_name and "vhost" in test_name:
1102 elif "testpmd" in test_name or "l3fwd" in test_name:
1105 elif "memif" in test_name:
1106 file_name = "container_memif"
1109 elif "srv6" in test_name:
1112 elif "vhost" in test_name:
1113 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1114 file_name = "vm_vhost_l2"
1115 if "114b" in test_name:
1117 elif "l2xcbase" in test_name and "x520" in test_name:
1118 feature = "-base-l2xc"
1119 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1120 feature = "-base-l2bd"
1123 elif "ip4base" in test_name:
1124 file_name = "vm_vhost_ip4"
1127 elif "ipsecbasetnlsw" in test_name:
1128 file_name = "ipsecsw"
1129 feature = "-base-scale"
1131 elif "ipsec" in test_name:
1133 feature = "-base-scale"
1134 if "hw-" in test_name:
1135 file_name = "ipsechw"
1136 elif "sw-" in test_name:
1137 file_name = "ipsecsw"
1139 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1140 file_name = "ip4_tunnels"
1143 elif "ip4base" in test_name or "ip4scale" in test_name:
1145 if "xl710" in test_name:
1146 feature = "-base-scale-features"
1147 elif "iacl" in test_name:
1148 feature = "-features-iacl"
1149 elif "oacl" in test_name:
1150 feature = "-features-oacl"
1151 elif "snat" in test_name or "cop" in test_name:
1152 feature = "-features"
1154 feature = "-base-scale"
1156 elif "ip6base" in test_name or "ip6scale" in test_name:
1158 feature = "-base-scale"
1160 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1161 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1162 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1164 if "macip" in test_name:
1165 feature = "-features-macip"
1166 elif "iacl" in test_name:
1167 feature = "-features-iacl"
1168 elif "oacl" in test_name:
1169 feature = "-features-oacl"
1171 feature = "-base-scale"
1173 if "x520" in test_name:
1175 elif "x710" in test_name:
1177 elif "xl710" in test_name:
1179 elif "xxv710" in test_name:
1181 elif "vic1227" in test_name:
1183 elif "vic1385" in test_name:
1189 if "64b" in test_name:
1191 elif "78b" in test_name:
1193 elif "imix" in test_name:
1195 elif "9000b" in test_name:
1197 elif "1518b" in test_name:
1199 elif "114b" in test_name:
1203 anchor += framesize + '-'
1205 if "1t1c" in test_name:
1207 elif "2t2c" in test_name:
1209 elif "4t4c" in test_name:
1211 elif "2t1c" in test_name:
1213 elif "4t2c" in test_name:
1215 elif "8t4c" in test_name:
1218 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1219 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1222 def table_performance_trending_dashboard_html(table, input_data):
1223 """Generate the table(s) with algorithm:
1224 table_performance_trending_dashboard_html specified in the specification
1227 :param table: Table to generate.
1228 :param input_data: Data to process.
1230 :type input_data: InputData
1233 testbed = table.get("testbed", None)
1235 logging.error("The testbed is not defined for the table '{0}'.".
1236 format(table.get("title", "")))
1239 logging.info(" Generating the table {0} ...".
1240 format(table.get("title", "")))
1243 with open(table["input-file"], 'rb') as csv_file:
1244 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1245 csv_lst = [item for item in csv_content]
1247 logging.warning("The input file is not defined.")
1249 except csv.Error as err:
1250 logging.warning("Not possible to process the file '{0}'.\n{1}".
1251 format(table["input-file"], err))
1255 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1258 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1259 for idx, item in enumerate(csv_lst[0]):
1260 alignment = "left" if idx == 0 else "center"
1261 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1265 colors = {"regression": ("#ffcccc", "#ff9999"),
1266 "progression": ("#c6ecc6", "#9fdf9f"),
1267 "normal": ("#e9f1fb", "#d4e4f7")}
1268 for r_idx, row in enumerate(csv_lst[1:]):
1270 color = "regression"
1272 color = "progression"
1275 background = colors[color][r_idx % 2]
1276 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1279 for c_idx, item in enumerate(row):
1280 alignment = "left" if c_idx == 0 else "center"
1281 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1284 url = _generate_url("../trending/", testbed, item)
1285 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1290 with open(table["output-file"], 'w') as html_file:
1291 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1292 html_file.write(".. raw:: html\n\n\t")
1293 html_file.write(ET.tostring(dashboard))
1294 html_file.write("\n\t<p><br><br></p>\n")
1296 logging.warning("The output file is not defined.")
1300 def table_last_failed_tests(table, input_data):
1301 """Generate the table(s) with algorithm: table_last_failed_tests
1302 specified in the specification file.
1304 :param table: Table to generate.
1305 :param input_data: Data to process.
1306 :type table: pandas.Series
1307 :type input_data: InputData
1310 logging.info(" Generating the table {0} ...".
1311 format(table.get("title", "")))
1313 # Transform the data
1314 logging.info(" Creating the data set for the {0} '{1}'.".
1315 format(table.get("type", ""), table.get("title", "")))
1316 data = input_data.filter_data(table, continue_on_error=True)
1318 if data is None or data.empty:
1319 logging.warn(" No data for the {0} '{1}'.".
1320 format(table.get("type", ""), table.get("title", "")))
1324 for job, builds in table["data"].items():
1325 for build in builds:
1328 version = input_data.metadata(job, build).get("version", "")
1330 logging.error("Data for {job}: {build} is not present.".
1331 format(job=job, build=build))
1333 tbl_list.append(build)
1334 tbl_list.append(version)
1335 for tst_name, tst_data in data[job][build].iteritems():
1336 if tst_data["status"] != "FAIL":
1338 groups = re.search(REGEX_NIC, tst_data["parent"])
1341 nic = groups.group(0)
1342 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1344 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1345 logging.info(" Writing file: '{0}'".format(file_name))
1346 with open(file_name, "w") as file_handler:
1347 for test in tbl_list:
1348 file_handler.write(test + '\n')
1351 def table_failed_tests(table, input_data):
1352 """Generate the table(s) with algorithm: table_failed_tests
1353 specified in the specification file.
1355 :param table: Table to generate.
1356 :param input_data: Data to process.
1357 :type table: pandas.Series
1358 :type input_data: InputData
1361 logging.info(" Generating the table {0} ...".
1362 format(table.get("title", "")))
1364 # Transform the data
1365 logging.info(" Creating the data set for the {0} '{1}'.".
1366 format(table.get("type", ""), table.get("title", "")))
1367 data = input_data.filter_data(table, continue_on_error=True)
1369 # Prepare the header of the tables
1370 header = ["Test Case",
1372 "Last Failure [Time]",
1373 "Last Failure [VPP-Build-Id]",
1374 "Last Failure [CSIT-Job-Build-Id]"]
1376 # Generate the data for the table according to the model in the table
1380 timeperiod = timedelta(int(table.get("window", 7)))
1383 for job, builds in table["data"].items():
1384 for build in builds:
1386 for tst_name, tst_data in data[job][build].iteritems():
1387 if tst_name.lower() in table.get("ignore-list", list()):
1389 if tbl_dict.get(tst_name, None) is None:
1390 groups = re.search(REGEX_NIC, tst_data["parent"])
1393 nic = groups.group(0)
1394 tbl_dict[tst_name] = {
1395 "name": "{0}-{1}".format(nic, tst_data["name"]),
1396 "data": OrderedDict()}
1398 generated = input_data.metadata(job, build).\
1399 get("generated", "")
1402 then = dt.strptime(generated, "%Y%m%d %H:%M")
1403 if (now - then) <= timeperiod:
1404 tbl_dict[tst_name]["data"][build] = (
1407 input_data.metadata(job, build).get("version", ""),
1409 except (TypeError, KeyError) as err:
1410 logging.warning("tst_name: {} - err: {}".
1411 format(tst_name, repr(err)))
1415 for tst_data in tbl_dict.values():
1417 for val in tst_data["data"].values():
1418 if val[0] == "FAIL":
1420 fails_last_date = val[1]
1421 fails_last_vpp = val[2]
1422 fails_last_csit = val[3]
1424 max_fails = fails_nr if fails_nr > max_fails else max_fails
1425 tbl_lst.append([tst_data["name"],
1429 "mrr-daily-build-{0}".format(fails_last_csit)])
1431 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1433 for nrf in range(max_fails, -1, -1):
1434 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1435 tbl_sorted.extend(tbl_fails)
1436 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1438 logging.info(" Writing file: '{0}'".format(file_name))
1439 with open(file_name, "w") as file_handler:
1440 file_handler.write(",".join(header) + "\n")
1441 for test in tbl_sorted:
1442 file_handler.write(",".join([str(item) for item in test]) + '\n')
1444 txt_file_name = "{0}.txt".format(table["output-file"])
1445 logging.info(" Writing file: '{0}'".format(txt_file_name))
1446 convert_csv_to_pretty_txt(file_name, txt_file_name)
1449 def table_failed_tests_html(table, input_data):
1450 """Generate the table(s) with algorithm: table_failed_tests_html
1451 specified in the specification file.
1453 :param table: Table to generate.
1454 :param input_data: Data to process.
1455 :type table: pandas.Series
1456 :type input_data: InputData
1459 testbed = table.get("testbed", None)
1461 logging.error("The testbed is not defined for the table '{0}'.".
1462 format(table.get("title", "")))
1465 logging.info(" Generating the table {0} ...".
1466 format(table.get("title", "")))
1469 with open(table["input-file"], 'rb') as csv_file:
1470 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1471 csv_lst = [item for item in csv_content]
1473 logging.warning("The input file is not defined.")
1475 except csv.Error as err:
1476 logging.warning("Not possible to process the file '{0}'.\n{1}".
1477 format(table["input-file"], err))
1481 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1484 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1485 for idx, item in enumerate(csv_lst[0]):
1486 alignment = "left" if idx == 0 else "center"
1487 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1491 colors = ("#e9f1fb", "#d4e4f7")
1492 for r_idx, row in enumerate(csv_lst[1:]):
1493 background = colors[r_idx % 2]
1494 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1497 for c_idx, item in enumerate(row):
1498 alignment = "left" if c_idx == 0 else "center"
1499 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1502 url = _generate_url("../trending/", testbed, item)
1503 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1508 with open(table["output-file"], 'w') as html_file:
1509 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1510 html_file.write(".. raw:: html\n\n\t")
1511 html_file.write(ET.tostring(failed_tests))
1512 html_file.write("\n\t<p><br><br></p>\n")
1514 logging.warning("The output file is not defined.")