1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 import plotly.graph_objects as go
23 import plotly.offline as ploff
26 from string import replace
27 from collections import OrderedDict
28 from numpy import nan, isnan
29 from xml.etree import ElementTree as ET
30 from datetime import datetime as dt
31 from datetime import timedelta
33 from utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
49 logging.info("Generating the tables ...")
50 for table in spec.tables:
52 eval(table["algorithm"])(table, data)
53 except NameError as err:
54 logging.error("Probably algorithm '{alg}' is not defined: {err}".
55 format(alg=table["algorithm"], err=repr(err)))
59 def table_details(table, input_data):
60 """Generate the table(s) with algorithm: table_detailed_test_results
61 specified in the specification file.
63 :param table: Table to generate.
64 :param input_data: Data to process.
65 :type table: pandas.Series
66 :type input_data: InputData
69 logging.info(" Generating the table {0} ...".
70 format(table.get("title", "")))
73 logging.info(" Creating the data set for the {0} '{1}'.".
74 format(table.get("type", ""), table.get("title", "")))
75 data = input_data.filter_data(table)
77 # Prepare the header of the tables
79 for column in table["columns"]:
80 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
82 # Generate the data for the table according to the model in the table
84 job = table["data"].keys()[0]
85 build = str(table["data"][job][0])
87 suites = input_data.suites(job, build)
89 logging.error(" No data available. The table will not be generated.")
92 for suite_longname, suite in suites.iteritems():
94 suite_name = suite["name"]
96 for test in data[job][build].keys():
97 if data[job][build][test]["parent"] in suite_name:
99 for column in table["columns"]:
101 col_data = str(data[job][build][test][column["data"].
102 split(" ")[1]]).replace('"', '""')
103 if column["data"].split(" ")[1] in ("conf-history",
105 col_data = replace(col_data, " |br| ", "",
107 col_data = " |prein| {0} |preout| ".\
108 format(col_data[:-5])
109 row_lst.append('"{0}"'.format(col_data))
111 row_lst.append("No data")
112 table_lst.append(row_lst)
114 # Write the data to file
116 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
117 table["output-file-ext"])
118 logging.info(" Writing file: '{}'".format(file_name))
119 with open(file_name, "w") as file_handler:
120 file_handler.write(",".join(header) + "\n")
121 for item in table_lst:
122 file_handler.write(",".join(item) + "\n")
124 logging.info(" Done.")
127 def table_merged_details(table, input_data):
128 """Generate the table(s) with algorithm: table_merged_details
129 specified in the specification file.
131 :param table: Table to generate.
132 :param input_data: Data to process.
133 :type table: pandas.Series
134 :type input_data: InputData
137 logging.info(" Generating the table {0} ...".
138 format(table.get("title", "")))
141 logging.info(" Creating the data set for the {0} '{1}'.".
142 format(table.get("type", ""), table.get("title", "")))
143 data = input_data.filter_data(table, continue_on_error=True)
144 data = input_data.merge_data(data)
145 data.sort_index(inplace=True)
147 logging.info(" Creating the data set for the {0} '{1}'.".
148 format(table.get("type", ""), table.get("title", "")))
149 suites = input_data.filter_data(
150 table, continue_on_error=True, data_set="suites")
151 suites = input_data.merge_data(suites)
153 # Prepare the header of the tables
155 for column in table["columns"]:
156 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
158 for _, suite in suites.iteritems():
160 suite_name = suite["name"]
162 for test in data.keys():
163 if data[test]["parent"] in suite_name:
165 for column in table["columns"]:
167 col_data = str(data[test][column["data"].
168 split(" ")[1]]).replace('"', '""')
169 col_data = replace(col_data, "No Data",
171 if column["data"].split(" ")[1] in ("conf-history",
173 col_data = replace(col_data, " |br| ", "",
175 col_data = " |prein| {0} |preout| ".\
176 format(col_data[:-5])
177 row_lst.append('"{0}"'.format(col_data))
179 row_lst.append('"Not captured"')
180 table_lst.append(row_lst)
182 # Write the data to file
184 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
185 table["output-file-ext"])
186 logging.info(" Writing file: '{}'".format(file_name))
187 with open(file_name, "w") as file_handler:
188 file_handler.write(",".join(header) + "\n")
189 for item in table_lst:
190 file_handler.write(",".join(item) + "\n")
192 logging.info(" Done.")
195 def _tpc_modify_test_name(test_name):
196 test_name_mod = test_name.replace("-ndrpdrdisc", ""). \
197 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
198 replace("-ndrdisc", "").replace("-pdr", ""). \
199 replace("-ndr", ""). \
200 replace("1t1c", "1c").replace("2t1c", "1c"). \
201 replace("2t2c", "2c").replace("4t2c", "2c"). \
202 replace("4t4c", "4c").replace("8t4c", "4c")
203 test_name_mod = re.sub(REGEX_NIC, "", test_name_mod)
207 def _tpc_modify_displayed_test_name(test_name):
208 return test_name.replace("1t1c", "1c").replace("2t1c", "1c"). \
209 replace("2t2c", "2c").replace("4t2c", "2c"). \
210 replace("4t4c", "4c").replace("8t4c", "4c")
213 def _tpc_insert_data(target, src, include_tests):
215 if include_tests == "MRR":
216 target.append(src["result"]["receive-rate"].avg)
217 elif include_tests == "PDR":
218 target.append(src["throughput"]["PDR"]["LOWER"])
219 elif include_tests == "NDR":
220 target.append(src["throughput"]["NDR"]["LOWER"])
221 except (KeyError, TypeError):
225 def _tpc_sort_table(table):
227 # 1. New in CSIT-XXXX
234 if isinstance(item[-1], str):
235 if "New in CSIT" in item[-1]:
237 elif "See footnote" in item[-1]:
240 tbl_delta.append(item)
243 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
244 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
245 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
246 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
248 # Put the tables together:
250 table.extend(tbl_new)
251 table.extend(tbl_see)
252 table.extend(tbl_delta)
257 def _tpc_generate_html_table(header, data, output_file_name):
258 """Generate html table from input data with simple sorting possibility.
260 :param header: Table header.
261 :param data: Input data to be included in the table. It is a list of lists.
262 Inner lists are rows in the table. All inner lists must be of the same
263 length. The length of these lists must be the same as the length of the
265 :param output_file_name: The name (relative or full path) where the
266 generated html table is written.
268 :type data: list of lists
269 :type output_file_name: str
272 df = pd.DataFrame(data, columns=header)
274 df_sorted = [df.sort_values(
275 by=[key, header[0]], ascending=[True, True]
276 if key != header[0] else [False, True]) for key in header]
277 df_sorted_rev = [df.sort_values(
278 by=[key, header[0]], ascending=[False, True]
279 if key != header[0] else [True, True]) for key in header]
280 df_sorted.extend(df_sorted_rev)
282 fill_color = [["#d4e4f7" if idx % 2 else "#e9f1fb"
283 for idx in range(len(df))]]
285 values=["<b>{item}</b>".format(item=item) for item in header],
286 fill_color="#7eade7",
287 align=["left", "center"]
292 for table in df_sorted:
293 columns = [table.get(col) for col in header]
296 columnwidth=[30, 10],
300 fill_color=fill_color,
301 align=["left", "right"]
307 menu_items = ["<b>{0}</b> (ascending)".format(itm) for itm in header]
308 menu_items_rev = ["<b>{0}</b> (descending)".format(itm) for itm in header]
309 menu_items.extend(menu_items_rev)
310 for idx, hdr in enumerate(menu_items):
311 visible = [False, ] * len(menu_items)
315 label=hdr.replace(" [Mpps]", ""),
317 args=[{"visible": visible}],
323 go.layout.Updatemenu(
330 active=len(menu_items) - 1,
331 buttons=list(buttons)
335 go.layout.Annotation(
336 text="<b>Sort by:</b>",
347 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
350 def table_performance_comparison(table, input_data):
351 """Generate the table(s) with algorithm: table_performance_comparison
352 specified in the specification file.
354 :param table: Table to generate.
355 :param input_data: Data to process.
356 :type table: pandas.Series
357 :type input_data: InputData
360 logging.info(" Generating the table {0} ...".
361 format(table.get("title", "")))
364 logging.info(" Creating the data set for the {0} '{1}'.".
365 format(table.get("type", ""), table.get("title", "")))
366 data = input_data.filter_data(table, continue_on_error=True)
368 # Prepare the header of the tables
370 header = ["Test case", ]
372 if table["include-tests"] == "MRR":
373 hdr_param = "Rec Rate"
377 history = table.get("history", None)
381 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
382 "{0} Stdev [Mpps]".format(item["title"])])
384 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
385 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
386 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
387 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
389 header_str = ",".join(header) + "\n"
390 except (AttributeError, KeyError) as err:
391 logging.error("The model is invalid, missing parameter: {0}".
395 # Prepare data to the table:
398 for job, builds in table["reference"]["data"].items():
399 topo = "2n-skx" if "2n-skx" in job else ""
401 for tst_name, tst_data in data[job][str(build)].iteritems():
402 tst_name_mod = _tpc_modify_test_name(tst_name)
403 if "across topologies" in table["title"].lower():
404 tst_name_mod = tst_name_mod.replace("2n1l-", "")
405 if tbl_dict.get(tst_name_mod, None) is None:
406 groups = re.search(REGEX_NIC, tst_data["parent"])
407 nic = groups.group(0) if groups else ""
408 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
410 if "across testbeds" in table["title"].lower() or \
411 "across topologies" in table["title"].lower():
412 name = _tpc_modify_displayed_test_name(name)
413 tbl_dict[tst_name_mod] = {"name": name,
416 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
418 include_tests=table["include-tests"])
420 for job, builds in table["compare"]["data"].items():
422 for tst_name, tst_data in data[job][str(build)].iteritems():
423 tst_name_mod = _tpc_modify_test_name(tst_name)
424 if "across topologies" in table["title"].lower():
425 tst_name_mod = tst_name_mod.replace("2n1l-", "")
426 if tbl_dict.get(tst_name_mod, None) is None:
427 groups = re.search(REGEX_NIC, tst_data["parent"])
428 nic = groups.group(0) if groups else ""
429 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
431 if "across testbeds" in table["title"].lower() or \
432 "across topologies" in table["title"].lower():
433 name = _tpc_modify_displayed_test_name(name)
434 tbl_dict[tst_name_mod] = {"name": name,
437 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
439 include_tests=table["include-tests"])
441 replacement = table["compare"].get("data-replacement", None)
443 create_new_list = True
444 rpl_data = input_data.filter_data(
445 table, data=replacement, continue_on_error=True)
446 for job, builds in replacement.items():
448 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
449 tst_name_mod = _tpc_modify_test_name(tst_name)
450 if "across topologies" in table["title"].lower():
451 tst_name_mod = tst_name_mod.replace("2n1l-", "")
452 if tbl_dict.get(tst_name_mod, None) is None:
453 name = "{0}".format("-".join(tst_data["name"].
455 if "across testbeds" in table["title"].lower() or \
456 "across topologies" in table["title"].lower():
457 name = _tpc_modify_displayed_test_name(name)
458 tbl_dict[tst_name_mod] = {"name": name,
462 create_new_list = False
463 tbl_dict[tst_name_mod]["cmp-data"] = list()
465 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
467 include_tests=table["include-tests"])
471 for job, builds in item["data"].items():
473 for tst_name, tst_data in data[job][str(build)].iteritems():
474 tst_name_mod = _tpc_modify_test_name(tst_name)
475 if "across topologies" in table["title"].lower():
476 tst_name_mod = tst_name_mod.replace("2n1l-", "")
477 if tbl_dict.get(tst_name_mod, None) is None:
479 if tbl_dict[tst_name_mod].get("history", None) is None:
480 tbl_dict[tst_name_mod]["history"] = OrderedDict()
481 if tbl_dict[tst_name_mod]["history"].\
482 get(item["title"], None) is None:
483 tbl_dict[tst_name_mod]["history"][item["title"]] = \
486 # TODO: Re-work when NDRPDRDISC tests are not used
487 if table["include-tests"] == "MRR":
488 tbl_dict[tst_name_mod]["history"][item[
489 "title"]].append(tst_data["result"][
491 elif table["include-tests"] == "PDR":
492 if tst_data["type"] == "PDR":
493 tbl_dict[tst_name_mod]["history"][
495 append(tst_data["throughput"]["value"])
496 elif tst_data["type"] == "NDRPDR":
497 tbl_dict[tst_name_mod]["history"][item[
498 "title"]].append(tst_data["throughput"][
500 elif table["include-tests"] == "NDR":
501 if tst_data["type"] == "NDR":
502 tbl_dict[tst_name_mod]["history"][
504 append(tst_data["throughput"]["value"])
505 elif tst_data["type"] == "NDRPDR":
506 tbl_dict[tst_name_mod]["history"][item[
507 "title"]].append(tst_data["throughput"][
511 except (TypeError, KeyError):
516 for tst_name in tbl_dict.keys():
517 item = [tbl_dict[tst_name]["name"], ]
519 if tbl_dict[tst_name].get("history", None) is not None:
520 for hist_data in tbl_dict[tst_name]["history"].values():
522 item.append(round(mean(hist_data) / 1000000, 2))
523 item.append(round(stdev(hist_data) / 1000000, 2))
525 item.extend(["Not tested", "Not tested"])
527 item.extend(["Not tested", "Not tested"])
528 data_t = tbl_dict[tst_name]["ref-data"]
530 item.append(round(mean(data_t) / 1000000, 2))
531 item.append(round(stdev(data_t) / 1000000, 2))
533 item.extend(["Not tested", "Not tested"])
534 data_t = tbl_dict[tst_name]["cmp-data"]
536 item.append(round(mean(data_t) / 1000000, 2))
537 item.append(round(stdev(data_t) / 1000000, 2))
539 item.extend(["Not tested", "Not tested"])
540 if item[-2] == "Not tested":
542 elif item[-4] == "Not tested":
543 item.append("New in CSIT-1908")
544 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
545 item.append("See footnote [1]")
548 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
549 if (len(item) == len(header)) and (item[-3] != "Not tested"):
552 tbl_lst = _tpc_sort_table(tbl_lst)
554 # Generate csv tables:
555 csv_file = "{0}.csv".format(table["output-file"])
556 with open(csv_file, "w") as file_handler:
557 file_handler.write(header_str)
559 file_handler.write(",".join([str(item) for item in test]) + "\n")
561 txt_file_name = "{0}.txt".format(table["output-file"])
562 convert_csv_to_pretty_txt(csv_file, txt_file_name)
565 with open(txt_file_name, 'a') as txt_file:
566 txt_file.writelines([
568 "[1] CSIT-1908 changed test methodology of dot1q tests in "
569 "2-node testbeds, dot1q encapsulation is now used on both "
571 " Previously dot1q was used only on a single link with the "
572 "other link carrying untagged Ethernet frames. This changes "
574 " in slightly lower throughput in CSIT-1908 for these "
575 "tests. See release notes."
578 # Generate html table:
579 _tpc_generate_html_table(header, tbl_lst,
580 "{0}.html".format(table["output-file"]))
583 def table_performance_comparison_nic(table, input_data):
584 """Generate the table(s) with algorithm: table_performance_comparison
585 specified in the specification file.
587 :param table: Table to generate.
588 :param input_data: Data to process.
589 :type table: pandas.Series
590 :type input_data: InputData
593 logging.info(" Generating the table {0} ...".
594 format(table.get("title", "")))
597 logging.info(" Creating the data set for the {0} '{1}'.".
598 format(table.get("type", ""), table.get("title", "")))
599 data = input_data.filter_data(table, continue_on_error=True)
601 # Prepare the header of the tables
603 header = ["Test case", ]
605 if table["include-tests"] == "MRR":
606 hdr_param = "Rec Rate"
610 history = table.get("history", None)
614 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
615 "{0} Stdev [Mpps]".format(item["title"])])
617 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
618 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
619 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
620 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
622 header_str = ",".join(header) + "\n"
623 except (AttributeError, KeyError) as err:
624 logging.error("The model is invalid, missing parameter: {0}".
628 # Prepare data to the table:
631 for job, builds in table["reference"]["data"].items():
632 topo = "2n-skx" if "2n-skx" in job else ""
634 for tst_name, tst_data in data[job][str(build)].iteritems():
635 if table["reference"]["nic"] not in tst_data["tags"]:
637 tst_name_mod = _tpc_modify_test_name(tst_name)
638 if "across topologies" in table["title"].lower():
639 tst_name_mod = tst_name_mod.replace("2n1l-", "")
640 if tbl_dict.get(tst_name_mod, None) is None:
641 name = "{0}".format("-".join(tst_data["name"].
643 if "across testbeds" in table["title"].lower() or \
644 "across topologies" in table["title"].lower():
645 name = _tpc_modify_displayed_test_name(name)
646 tbl_dict[tst_name_mod] = {"name": name,
649 _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
651 include_tests=table["include-tests"])
653 for job, builds in table["compare"]["data"].items():
655 for tst_name, tst_data in data[job][str(build)].iteritems():
656 if table["compare"]["nic"] not in tst_data["tags"]:
658 tst_name_mod = _tpc_modify_test_name(tst_name)
659 if "across topologies" in table["title"].lower():
660 tst_name_mod = tst_name_mod.replace("2n1l-", "")
661 if tbl_dict.get(tst_name_mod, None) is None:
662 name = "{0}".format("-".join(tst_data["name"].
664 if "across testbeds" in table["title"].lower() or \
665 "across topologies" in table["title"].lower():
666 name = _tpc_modify_displayed_test_name(name)
667 tbl_dict[tst_name_mod] = {"name": name,
670 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
672 include_tests=table["include-tests"])
674 replacement = table["compare"].get("data-replacement", None)
676 create_new_list = True
677 rpl_data = input_data.filter_data(
678 table, data=replacement, continue_on_error=True)
679 for job, builds in replacement.items():
681 for tst_name, tst_data in rpl_data[job][str(build)].iteritems():
682 if table["compare"]["nic"] not in tst_data["tags"]:
684 tst_name_mod = _tpc_modify_test_name(tst_name)
685 if "across topologies" in table["title"].lower():
686 tst_name_mod = tst_name_mod.replace("2n1l-", "")
687 if tbl_dict.get(tst_name_mod, None) is None:
688 name = "{0}".format("-".join(tst_data["name"].
690 if "across testbeds" in table["title"].lower() or \
691 "across topologies" in table["title"].lower():
692 name = _tpc_modify_displayed_test_name(name)
693 tbl_dict[tst_name_mod] = {"name": name,
697 create_new_list = False
698 tbl_dict[tst_name_mod]["cmp-data"] = list()
700 _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
702 include_tests=table["include-tests"])
706 for job, builds in item["data"].items():
708 for tst_name, tst_data in data[job][str(build)].iteritems():
709 if item["nic"] not in tst_data["tags"]:
711 tst_name_mod = _tpc_modify_test_name(tst_name)
712 if "across topologies" in table["title"].lower():
713 tst_name_mod = tst_name_mod.replace("2n1l-", "")
714 if tbl_dict.get(tst_name_mod, None) is None:
716 if tbl_dict[tst_name_mod].get("history", None) is None:
717 tbl_dict[tst_name_mod]["history"] = OrderedDict()
718 if tbl_dict[tst_name_mod]["history"].\
719 get(item["title"], None) is None:
720 tbl_dict[tst_name_mod]["history"][item["title"]] = \
723 # TODO: Re-work when NDRPDRDISC tests are not used
724 if table["include-tests"] == "MRR":
725 tbl_dict[tst_name_mod]["history"][item[
726 "title"]].append(tst_data["result"][
728 elif table["include-tests"] == "PDR":
729 if tst_data["type"] == "PDR":
730 tbl_dict[tst_name_mod]["history"][
732 append(tst_data["throughput"]["value"])
733 elif tst_data["type"] == "NDRPDR":
734 tbl_dict[tst_name_mod]["history"][item[
735 "title"]].append(tst_data["throughput"][
737 elif table["include-tests"] == "NDR":
738 if tst_data["type"] == "NDR":
739 tbl_dict[tst_name_mod]["history"][
741 append(tst_data["throughput"]["value"])
742 elif tst_data["type"] == "NDRPDR":
743 tbl_dict[tst_name_mod]["history"][item[
744 "title"]].append(tst_data["throughput"][
748 except (TypeError, KeyError):
753 for tst_name in tbl_dict.keys():
754 item = [tbl_dict[tst_name]["name"], ]
756 if tbl_dict[tst_name].get("history", None) is not None:
757 for hist_data in tbl_dict[tst_name]["history"].values():
759 item.append(round(mean(hist_data) / 1000000, 2))
760 item.append(round(stdev(hist_data) / 1000000, 2))
762 item.extend(["Not tested", "Not tested"])
764 item.extend(["Not tested", "Not tested"])
765 data_t = tbl_dict[tst_name]["ref-data"]
767 item.append(round(mean(data_t) / 1000000, 2))
768 item.append(round(stdev(data_t) / 1000000, 2))
770 item.extend(["Not tested", "Not tested"])
771 data_t = tbl_dict[tst_name]["cmp-data"]
773 item.append(round(mean(data_t) / 1000000, 2))
774 item.append(round(stdev(data_t) / 1000000, 2))
776 item.extend(["Not tested", "Not tested"])
777 if item[-2] == "Not tested":
779 elif item[-4] == "Not tested":
780 item.append("New in CSIT-1908")
781 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
782 item.append("See footnote [1]")
785 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
786 if (len(item) == len(header)) and (item[-3] != "Not tested"):
789 tbl_lst = _tpc_sort_table(tbl_lst)
791 # Generate csv tables:
792 csv_file = "{0}.csv".format(table["output-file"])
793 with open(csv_file, "w") as file_handler:
794 file_handler.write(header_str)
796 file_handler.write(",".join([str(item) for item in test]) + "\n")
798 txt_file_name = "{0}.txt".format(table["output-file"])
799 convert_csv_to_pretty_txt(csv_file, txt_file_name)
802 with open(txt_file_name, 'a') as txt_file:
803 txt_file.writelines([
805 "[1] CSIT-1908 changed test methodology of dot1q tests in "
806 "2-node testbeds, dot1q encapsulation is now used on both "
808 " Previously dot1q was used only on a single link with the "
809 "other link carrying untagged Ethernet frames. This changes "
811 " in slightly lower throughput in CSIT-1908 for these "
812 "tests. See release notes."
815 # Generate html table:
816 _tpc_generate_html_table(header, tbl_lst,
817 "{0}.html".format(table["output-file"]))
820 def table_nics_comparison(table, input_data):
821 """Generate the table(s) with algorithm: table_nics_comparison
822 specified in the specification file.
824 :param table: Table to generate.
825 :param input_data: Data to process.
826 :type table: pandas.Series
827 :type input_data: InputData
830 logging.info(" Generating the table {0} ...".
831 format(table.get("title", "")))
834 logging.info(" Creating the data set for the {0} '{1}'.".
835 format(table.get("type", ""), table.get("title", "")))
836 data = input_data.filter_data(table, continue_on_error=True)
838 # Prepare the header of the tables
840 header = ["Test case", ]
842 if table["include-tests"] == "MRR":
843 hdr_param = "Rec Rate"
848 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
849 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
850 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
851 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
853 header_str = ",".join(header) + "\n"
854 except (AttributeError, KeyError) as err:
855 logging.error("The model is invalid, missing parameter: {0}".
859 # Prepare data to the table:
861 for job, builds in table["data"].items():
863 for tst_name, tst_data in data[job][str(build)].iteritems():
864 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
865 replace("-ndrpdr", "").replace("-pdrdisc", "").\
866 replace("-ndrdisc", "").replace("-pdr", "").\
867 replace("-ndr", "").\
868 replace("1t1c", "1c").replace("2t1c", "1c").\
869 replace("2t2c", "2c").replace("4t2c", "2c").\
870 replace("4t4c", "4c").replace("8t4c", "4c")
871 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
872 if tbl_dict.get(tst_name_mod, None) is None:
873 name = "-".join(tst_data["name"].split("-")[:-1])
874 tbl_dict[tst_name_mod] = {"name": name,
878 if table["include-tests"] == "MRR":
879 result = tst_data["result"]["receive-rate"].avg
880 elif table["include-tests"] == "PDR":
881 result = tst_data["throughput"]["PDR"]["LOWER"]
882 elif table["include-tests"] == "NDR":
883 result = tst_data["throughput"]["NDR"]["LOWER"]
888 if table["reference"]["nic"] in tst_data["tags"]:
889 tbl_dict[tst_name_mod]["ref-data"].append(result)
890 elif table["compare"]["nic"] in tst_data["tags"]:
891 tbl_dict[tst_name_mod]["cmp-data"].append(result)
892 except (TypeError, KeyError) as err:
893 logging.debug("No data for {0}".format(tst_name))
894 logging.debug(repr(err))
895 # No data in output.xml for this test
898 for tst_name in tbl_dict.keys():
899 item = [tbl_dict[tst_name]["name"], ]
900 data_t = tbl_dict[tst_name]["ref-data"]
902 item.append(round(mean(data_t) / 1000000, 2))
903 item.append(round(stdev(data_t) / 1000000, 2))
905 item.extend([None, None])
906 data_t = tbl_dict[tst_name]["cmp-data"]
908 item.append(round(mean(data_t) / 1000000, 2))
909 item.append(round(stdev(data_t) / 1000000, 2))
911 item.extend([None, None])
912 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
913 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
914 if len(item) == len(header):
917 # Sort the table according to the relative change
918 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
920 # Generate csv tables:
921 csv_file = "{0}.csv".format(table["output-file"])
922 with open(csv_file, "w") as file_handler:
923 file_handler.write(header_str)
925 file_handler.write(",".join([str(item) for item in test]) + "\n")
927 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
929 # Generate html table:
930 _tpc_generate_html_table(header, tbl_lst,
931 "{0}.html".format(table["output-file"]))
934 def table_soak_vs_ndr(table, input_data):
935 """Generate the table(s) with algorithm: table_soak_vs_ndr
936 specified in the specification file.
938 :param table: Table to generate.
939 :param input_data: Data to process.
940 :type table: pandas.Series
941 :type input_data: InputData
944 logging.info(" Generating the table {0} ...".
945 format(table.get("title", "")))
948 logging.info(" Creating the data set for the {0} '{1}'.".
949 format(table.get("type", ""), table.get("title", "")))
950 data = input_data.filter_data(table, continue_on_error=True)
952 # Prepare the header of the table
956 "{0} Thput [Mpps]".format(table["reference"]["title"]),
957 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
958 "{0} Thput [Mpps]".format(table["compare"]["title"]),
959 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
960 "Delta [%]", "Stdev of delta [%]"]
961 header_str = ",".join(header) + "\n"
962 except (AttributeError, KeyError) as err:
963 logging.error("The model is invalid, missing parameter: {0}".
967 # Create a list of available SOAK test results:
969 for job, builds in table["compare"]["data"].items():
971 for tst_name, tst_data in data[job][str(build)].iteritems():
972 if tst_data["type"] == "SOAK":
973 tst_name_mod = tst_name.replace("-soak", "")
974 if tbl_dict.get(tst_name_mod, None) is None:
975 groups = re.search(REGEX_NIC, tst_data["parent"])
976 nic = groups.group(0) if groups else ""
977 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
979 tbl_dict[tst_name_mod] = {
985 tbl_dict[tst_name_mod]["cmp-data"].append(
986 tst_data["throughput"]["LOWER"])
987 except (KeyError, TypeError):
989 tests_lst = tbl_dict.keys()
991 # Add corresponding NDR test results:
992 for job, builds in table["reference"]["data"].items():
994 for tst_name, tst_data in data[job][str(build)].iteritems():
995 tst_name_mod = tst_name.replace("-ndrpdr", "").\
997 if tst_name_mod in tests_lst:
999 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
1000 if table["include-tests"] == "MRR":
1001 result = tst_data["result"]["receive-rate"].avg
1002 elif table["include-tests"] == "PDR":
1003 result = tst_data["throughput"]["PDR"]["LOWER"]
1004 elif table["include-tests"] == "NDR":
1005 result = tst_data["throughput"]["NDR"]["LOWER"]
1008 if result is not None:
1009 tbl_dict[tst_name_mod]["ref-data"].append(
1011 except (KeyError, TypeError):
1015 for tst_name in tbl_dict.keys():
1016 item = [tbl_dict[tst_name]["name"], ]
1017 data_r = tbl_dict[tst_name]["ref-data"]
1019 data_r_mean = mean(data_r)
1020 item.append(round(data_r_mean / 1000000, 2))
1021 data_r_stdev = stdev(data_r)
1022 item.append(round(data_r_stdev / 1000000, 2))
1026 item.extend([None, None])
1027 data_c = tbl_dict[tst_name]["cmp-data"]
1029 data_c_mean = mean(data_c)
1030 item.append(round(data_c_mean / 1000000, 2))
1031 data_c_stdev = stdev(data_c)
1032 item.append(round(data_c_stdev / 1000000, 2))
1036 item.extend([None, None])
1037 if data_r_mean and data_c_mean:
1038 delta, d_stdev = relative_change_stdev(
1039 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1040 item.append(round(delta, 2))
1041 item.append(round(d_stdev, 2))
1042 tbl_lst.append(item)
1044 # Sort the table according to the relative change
1045 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1047 # Generate csv tables:
1048 csv_file = "{0}.csv".format(table["output-file"])
1049 with open(csv_file, "w") as file_handler:
1050 file_handler.write(header_str)
1051 for test in tbl_lst:
1052 file_handler.write(",".join([str(item) for item in test]) + "\n")
1054 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
1056 # Generate html table:
1057 _tpc_generate_html_table(header, tbl_lst,
1058 "{0}.html".format(table["output-file"]))
1061 def table_performance_trending_dashboard(table, input_data):
1062 """Generate the table(s) with algorithm:
1063 table_performance_trending_dashboard
1064 specified in the specification file.
1066 :param table: Table to generate.
1067 :param input_data: Data to process.
1068 :type table: pandas.Series
1069 :type input_data: InputData
1072 logging.info(" Generating the table {0} ...".
1073 format(table.get("title", "")))
1075 # Transform the data
1076 logging.info(" Creating the data set for the {0} '{1}'.".
1077 format(table.get("type", ""), table.get("title", "")))
1078 data = input_data.filter_data(table, continue_on_error=True)
1080 # Prepare the header of the tables
1081 header = ["Test Case",
1083 "Short-Term Change [%]",
1084 "Long-Term Change [%]",
1088 header_str = ",".join(header) + "\n"
1090 # Prepare data to the table:
1092 for job, builds in table["data"].items():
1093 for build in builds:
1094 for tst_name, tst_data in data[job][str(build)].iteritems():
1095 if tst_name.lower() in table.get("ignore-list", list()):
1097 if tbl_dict.get(tst_name, None) is None:
1098 groups = re.search(REGEX_NIC, tst_data["parent"])
1101 nic = groups.group(0)
1102 tbl_dict[tst_name] = {
1103 "name": "{0}-{1}".format(nic, tst_data["name"]),
1104 "data": OrderedDict()}
1106 tbl_dict[tst_name]["data"][str(build)] = \
1107 tst_data["result"]["receive-rate"]
1108 except (TypeError, KeyError):
1109 pass # No data in output.xml for this test
1112 for tst_name in tbl_dict.keys():
1113 data_t = tbl_dict[tst_name]["data"]
1117 classification_lst, avgs = classify_anomalies(data_t)
1119 win_size = min(len(data_t), table["window"])
1120 long_win_size = min(len(data_t), table["long-trend-window"])
1124 [x for x in avgs[-long_win_size:-win_size]
1129 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1131 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1132 rel_change_last = nan
1134 rel_change_last = round(
1135 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1137 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1138 rel_change_long = nan
1140 rel_change_long = round(
1141 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1143 if classification_lst:
1144 if isnan(rel_change_last) and isnan(rel_change_long):
1146 if isnan(last_avg) or isnan(rel_change_last) or \
1147 isnan(rel_change_long):
1150 [tbl_dict[tst_name]["name"],
1151 round(last_avg / 1000000, 2),
1154 classification_lst[-win_size:].count("regression"),
1155 classification_lst[-win_size:].count("progression")])
1157 tbl_lst.sort(key=lambda rel: rel[0])
1160 for nrr in range(table["window"], -1, -1):
1161 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1162 for nrp in range(table["window"], -1, -1):
1163 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1164 tbl_out.sort(key=lambda rel: rel[2])
1165 tbl_sorted.extend(tbl_out)
1167 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1169 logging.info(" Writing file: '{0}'".format(file_name))
1170 with open(file_name, "w") as file_handler:
1171 file_handler.write(header_str)
1172 for test in tbl_sorted:
1173 file_handler.write(",".join([str(item) for item in test]) + '\n')
1175 txt_file_name = "{0}.txt".format(table["output-file"])
1176 logging.info(" Writing file: '{0}'".format(txt_file_name))
1177 convert_csv_to_pretty_txt(file_name, txt_file_name)
1180 def _generate_url(base, testbed, test_name):
1181 """Generate URL to a trending plot from the name of the test case.
1183 :param base: The base part of URL common to all test cases.
1184 :param testbed: The testbed used for testing.
1185 :param test_name: The name of the test case.
1188 :type test_name: str
1189 :returns: The URL to the plot with the trending data for the given test
1199 if "lbdpdk" in test_name or "lbvpp" in test_name:
1200 file_name = "link_bonding"
1202 elif "114b" in test_name and "vhost" in test_name:
1205 elif "testpmd" in test_name or "l3fwd" in test_name:
1208 elif "memif" in test_name:
1209 file_name = "container_memif"
1212 elif "srv6" in test_name:
1215 elif "vhost" in test_name:
1216 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1217 file_name = "vm_vhost_l2"
1218 if "114b" in test_name:
1220 elif "l2xcbase" in test_name and "x520" in test_name:
1221 feature = "-base-l2xc"
1222 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1223 feature = "-base-l2bd"
1226 elif "ip4base" in test_name:
1227 file_name = "vm_vhost_ip4"
1230 elif "ipsecbasetnlsw" in test_name:
1231 file_name = "ipsecsw"
1232 feature = "-base-scale"
1234 elif "ipsec" in test_name:
1236 feature = "-base-scale"
1237 if "hw-" in test_name:
1238 file_name = "ipsechw"
1239 elif "sw-" in test_name:
1240 file_name = "ipsecsw"
1241 if "-int-" in test_name:
1242 feature = "-base-scale-int"
1243 elif "tnl" in test_name:
1244 feature = "-base-scale-tnl"
1246 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1247 file_name = "ip4_tunnels"
1250 elif "ip4base" in test_name or "ip4scale" in test_name:
1252 if "xl710" in test_name:
1253 feature = "-base-scale-features"
1254 elif "iacl" in test_name:
1255 feature = "-features-iacl"
1256 elif "oacl" in test_name:
1257 feature = "-features-oacl"
1258 elif "snat" in test_name or "cop" in test_name:
1259 feature = "-features"
1261 feature = "-base-scale"
1263 elif "ip6base" in test_name or "ip6scale" in test_name:
1265 feature = "-base-scale"
1267 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1268 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1269 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1271 if "macip" in test_name:
1272 feature = "-features-macip"
1273 elif "iacl" in test_name:
1274 feature = "-features-iacl"
1275 elif "oacl" in test_name:
1276 feature = "-features-oacl"
1278 feature = "-base-scale"
1280 if "x520" in test_name:
1282 elif "x710" in test_name:
1284 elif "xl710" in test_name:
1286 elif "xxv710" in test_name:
1288 elif "vic1227" in test_name:
1290 elif "vic1385" in test_name:
1292 elif "x553" in test_name:
1298 if "64b" in test_name:
1300 elif "78b" in test_name:
1302 elif "imix" in test_name:
1304 elif "9000b" in test_name:
1306 elif "1518b" in test_name:
1308 elif "114b" in test_name:
1312 anchor += framesize + '-'
1314 if "1t1c" in test_name:
1316 elif "2t2c" in test_name:
1318 elif "4t4c" in test_name:
1320 elif "2t1c" in test_name:
1322 elif "4t2c" in test_name:
1324 elif "8t4c" in test_name:
1327 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1328 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1331 def table_performance_trending_dashboard_html(table, input_data):
1332 """Generate the table(s) with algorithm:
1333 table_performance_trending_dashboard_html specified in the specification
1336 :param table: Table to generate.
1337 :param input_data: Data to process.
1339 :type input_data: InputData
1342 testbed = table.get("testbed", None)
1344 logging.error("The testbed is not defined for the table '{0}'.".
1345 format(table.get("title", "")))
1348 logging.info(" Generating the table {0} ...".
1349 format(table.get("title", "")))
1352 with open(table["input-file"], 'rb') as csv_file:
1353 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1354 csv_lst = [item for item in csv_content]
1356 logging.warning("The input file is not defined.")
1358 except csv.Error as err:
1359 logging.warning("Not possible to process the file '{0}'.\n{1}".
1360 format(table["input-file"], err))
1364 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1367 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1368 for idx, item in enumerate(csv_lst[0]):
1369 alignment = "left" if idx == 0 else "center"
1370 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1374 colors = {"regression": ("#ffcccc", "#ff9999"),
1375 "progression": ("#c6ecc6", "#9fdf9f"),
1376 "normal": ("#e9f1fb", "#d4e4f7")}
1377 for r_idx, row in enumerate(csv_lst[1:]):
1379 color = "regression"
1381 color = "progression"
1384 background = colors[color][r_idx % 2]
1385 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1388 for c_idx, item in enumerate(row):
1389 alignment = "left" if c_idx == 0 else "center"
1390 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1393 url = _generate_url("../trending/", testbed, item)
1394 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1399 with open(table["output-file"], 'w') as html_file:
1400 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1401 html_file.write(".. raw:: html\n\n\t")
1402 html_file.write(ET.tostring(dashboard))
1403 html_file.write("\n\t<p><br><br></p>\n")
1405 logging.warning("The output file is not defined.")
1409 def table_last_failed_tests(table, input_data):
1410 """Generate the table(s) with algorithm: table_last_failed_tests
1411 specified in the specification file.
1413 :param table: Table to generate.
1414 :param input_data: Data to process.
1415 :type table: pandas.Series
1416 :type input_data: InputData
1419 logging.info(" Generating the table {0} ...".
1420 format(table.get("title", "")))
1422 # Transform the data
1423 logging.info(" Creating the data set for the {0} '{1}'.".
1424 format(table.get("type", ""), table.get("title", "")))
1425 data = input_data.filter_data(table, continue_on_error=True)
1427 if data is None or data.empty:
1428 logging.warn(" No data for the {0} '{1}'.".
1429 format(table.get("type", ""), table.get("title", "")))
1433 for job, builds in table["data"].items():
1434 for build in builds:
1437 version = input_data.metadata(job, build).get("version", "")
1439 logging.error("Data for {job}: {build} is not present.".
1440 format(job=job, build=build))
1442 tbl_list.append(build)
1443 tbl_list.append(version)
1444 failed_tests = list()
1447 for tst_name, tst_data in data[job][build].iteritems():
1448 if tst_data["status"] != "FAIL":
1452 groups = re.search(REGEX_NIC, tst_data["parent"])
1455 nic = groups.group(0)
1456 failed_tests.append("{0}-{1}".format(nic, tst_data["name"]))
1457 tbl_list.append(str(passed))
1458 tbl_list.append(str(failed))
1459 tbl_list.extend(failed_tests)
1461 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1462 logging.info(" Writing file: '{0}'".format(file_name))
1463 with open(file_name, "w") as file_handler:
1464 for test in tbl_list:
1465 file_handler.write(test + '\n')
1468 def table_failed_tests(table, input_data):
1469 """Generate the table(s) with algorithm: table_failed_tests
1470 specified in the specification file.
1472 :param table: Table to generate.
1473 :param input_data: Data to process.
1474 :type table: pandas.Series
1475 :type input_data: InputData
1478 logging.info(" Generating the table {0} ...".
1479 format(table.get("title", "")))
1481 # Transform the data
1482 logging.info(" Creating the data set for the {0} '{1}'.".
1483 format(table.get("type", ""), table.get("title", "")))
1484 data = input_data.filter_data(table, continue_on_error=True)
1486 # Prepare the header of the tables
1487 header = ["Test Case",
1489 "Last Failure [Time]",
1490 "Last Failure [VPP-Build-Id]",
1491 "Last Failure [CSIT-Job-Build-Id]"]
1493 # Generate the data for the table according to the model in the table
1497 timeperiod = timedelta(int(table.get("window", 7)))
1500 for job, builds in table["data"].items():
1501 for build in builds:
1503 for tst_name, tst_data in data[job][build].iteritems():
1504 if tst_name.lower() in table.get("ignore-list", list()):
1506 if tbl_dict.get(tst_name, None) is None:
1507 groups = re.search(REGEX_NIC, tst_data["parent"])
1510 nic = groups.group(0)
1511 tbl_dict[tst_name] = {
1512 "name": "{0}-{1}".format(nic, tst_data["name"]),
1513 "data": OrderedDict()}
1515 generated = input_data.metadata(job, build).\
1516 get("generated", "")
1519 then = dt.strptime(generated, "%Y%m%d %H:%M")
1520 if (now - then) <= timeperiod:
1521 tbl_dict[tst_name]["data"][build] = (
1524 input_data.metadata(job, build).get("version", ""),
1526 except (TypeError, KeyError) as err:
1527 logging.warning("tst_name: {} - err: {}".
1528 format(tst_name, repr(err)))
1532 for tst_data in tbl_dict.values():
1534 fails_last_date = ""
1536 fails_last_csit = ""
1537 for val in tst_data["data"].values():
1538 if val[0] == "FAIL":
1540 fails_last_date = val[1]
1541 fails_last_vpp = val[2]
1542 fails_last_csit = val[3]
1544 max_fails = fails_nr if fails_nr > max_fails else max_fails
1545 tbl_lst.append([tst_data["name"],
1549 "mrr-daily-build-{0}".format(fails_last_csit)])
1551 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1553 for nrf in range(max_fails, -1, -1):
1554 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1555 tbl_sorted.extend(tbl_fails)
1556 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1558 logging.info(" Writing file: '{0}'".format(file_name))
1559 with open(file_name, "w") as file_handler:
1560 file_handler.write(",".join(header) + "\n")
1561 for test in tbl_sorted:
1562 file_handler.write(",".join([str(item) for item in test]) + '\n')
1564 txt_file_name = "{0}.txt".format(table["output-file"])
1565 logging.info(" Writing file: '{0}'".format(txt_file_name))
1566 convert_csv_to_pretty_txt(file_name, txt_file_name)
1569 def table_failed_tests_html(table, input_data):
1570 """Generate the table(s) with algorithm: table_failed_tests_html
1571 specified in the specification file.
1573 :param table: Table to generate.
1574 :param input_data: Data to process.
1575 :type table: pandas.Series
1576 :type input_data: InputData
1579 testbed = table.get("testbed", None)
1581 logging.error("The testbed is not defined for the table '{0}'.".
1582 format(table.get("title", "")))
1585 logging.info(" Generating the table {0} ...".
1586 format(table.get("title", "")))
1589 with open(table["input-file"], 'rb') as csv_file:
1590 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1591 csv_lst = [item for item in csv_content]
1593 logging.warning("The input file is not defined.")
1595 except csv.Error as err:
1596 logging.warning("Not possible to process the file '{0}'.\n{1}".
1597 format(table["input-file"], err))
1601 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1604 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1605 for idx, item in enumerate(csv_lst[0]):
1606 alignment = "left" if idx == 0 else "center"
1607 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1611 colors = ("#e9f1fb", "#d4e4f7")
1612 for r_idx, row in enumerate(csv_lst[1:]):
1613 background = colors[r_idx % 2]
1614 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1617 for c_idx, item in enumerate(row):
1618 alignment = "left" if c_idx == 0 else "center"
1619 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1622 url = _generate_url("../trending/", testbed, item)
1623 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1628 with open(table["output-file"], 'w') as html_file:
1629 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1630 html_file.write(".. raw:: html\n\n\t")
1631 html_file.write(ET.tostring(failed_tests))
1632 html_file.write("\n\t<p><br><br></p>\n")
1634 logging.warning("The output file is not defined.")