1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Rec Rate"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
238 topo = "2n-skx" if "2n-skx" in job else ""
240 for tst_name, tst_data in data[job][str(build)].iteritems():
241 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242 replace("-ndrpdr", "").replace("-pdrdisc", "").\
243 replace("-ndrdisc", "").replace("-pdr", "").\
244 replace("-ndr", "").\
245 replace("1t1c", "1c").replace("2t1c", "1c").\
246 replace("2t2c", "2c").replace("4t2c", "2c").\
247 replace("4t4c", "4c").replace("8t4c", "4c")
248 if "across topologies" in table["title"].lower():
249 tst_name_mod = tst_name_mod.replace("2n1l-", "")
250 if tbl_dict.get(tst_name_mod, None) is None:
251 groups = re.search(REGEX_NIC, tst_data["parent"])
252 nic = groups.group(0) if groups else ""
253 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
255 if "across testbeds" in table["title"].lower() or \
256 "across topologies" in table["title"].lower():
258 replace("1t1c", "1c").replace("2t1c", "1c").\
259 replace("2t2c", "2c").replace("4t2c", "2c").\
260 replace("4t4c", "4c").replace("8t4c", "4c")
261 tbl_dict[tst_name_mod] = {"name": name,
265 # TODO: Re-work when NDRPDRDISC tests are not used
266 if table["include-tests"] == "MRR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["result"]["receive-rate"].avg)
269 elif table["include-tests"] == "PDR":
270 if tst_data["type"] == "PDR":
271 tbl_dict[tst_name_mod]["ref-data"]. \
272 append(tst_data["throughput"]["value"])
273 elif tst_data["type"] == "NDRPDR":
274 tbl_dict[tst_name_mod]["ref-data"].append(
275 tst_data["throughput"]["PDR"]["LOWER"])
276 elif table["include-tests"] == "NDR":
277 if tst_data["type"] == "NDR":
278 tbl_dict[tst_name_mod]["ref-data"]. \
279 append(tst_data["throughput"]["value"])
280 elif tst_data["type"] == "NDRPDR":
281 tbl_dict[tst_name_mod]["ref-data"].append(
282 tst_data["throughput"]["NDR"]["LOWER"])
286 pass # No data in output.xml for this test
288 for job, builds in table["compare"]["data"].items():
290 for tst_name, tst_data in data[job][str(build)].iteritems():
291 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293 replace("-ndrdisc", "").replace("-pdr", ""). \
294 replace("-ndr", "").\
295 replace("1t1c", "1c").replace("2t1c", "1c").\
296 replace("2t2c", "2c").replace("4t2c", "2c").\
297 replace("4t4c", "4c").replace("8t4c", "4c")
298 if "across topologies" in table["title"].lower():
299 tst_name_mod = tst_name_mod.replace("2n1l-", "")
300 if tbl_dict.get(tst_name_mod, None) is None:
301 groups = re.search(REGEX_NIC, tst_data["parent"])
302 nic = groups.group(0) if groups else ""
303 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
305 if "across testbeds" in table["title"].lower() or \
306 "across topologies" in table["title"].lower():
308 replace("1t1c", "1c").replace("2t1c", "1c").\
309 replace("2t2c", "2c").replace("4t2c", "2c").\
310 replace("4t4c", "4c").replace("8t4c", "4c")
311 tbl_dict[tst_name_mod] = {"name": name,
315 # TODO: Re-work when NDRPDRDISC tests are not used
316 if table["include-tests"] == "MRR":
317 tbl_dict[tst_name_mod]["cmp-data"]. \
318 append(tst_data["result"]["receive-rate"].avg)
319 elif table["include-tests"] == "PDR":
320 if tst_data["type"] == "PDR":
321 tbl_dict[tst_name_mod]["cmp-data"]. \
322 append(tst_data["throughput"]["value"])
323 elif tst_data["type"] == "NDRPDR":
324 tbl_dict[tst_name_mod]["cmp-data"].append(
325 tst_data["throughput"]["PDR"]["LOWER"])
326 elif table["include-tests"] == "NDR":
327 if tst_data["type"] == "NDR":
328 tbl_dict[tst_name_mod]["cmp-data"]. \
329 append(tst_data["throughput"]["value"])
330 elif tst_data["type"] == "NDRPDR":
331 tbl_dict[tst_name_mod]["cmp-data"].append(
332 tst_data["throughput"]["NDR"]["LOWER"])
335 except (KeyError, TypeError):
339 for job, builds in item["data"].items():
341 for tst_name, tst_data in data[job][str(build)].iteritems():
342 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344 replace("-ndrdisc", "").replace("-pdr", ""). \
345 replace("-ndr", "").\
346 replace("1t1c", "1c").replace("2t1c", "1c").\
347 replace("2t2c", "2c").replace("4t2c", "2c").\
348 replace("4t4c", "4c").replace("8t4c", "4c")
349 if "across topologies" in table["title"].lower():
350 tst_name_mod = tst_name_mod.replace("2n1l-", "")
351 if tbl_dict.get(tst_name_mod, None) is None:
353 if tbl_dict[tst_name_mod].get("history", None) is None:
354 tbl_dict[tst_name_mod]["history"] = OrderedDict()
355 if tbl_dict[tst_name_mod]["history"].get(item["title"],
357 tbl_dict[tst_name_mod]["history"][item["title"]] = \
360 # TODO: Re-work when NDRPDRDISC tests are not used
361 if table["include-tests"] == "MRR":
362 tbl_dict[tst_name_mod]["history"][item["title"
363 ]].append(tst_data["result"]["receive-rate"].
365 elif table["include-tests"] == "PDR":
366 if tst_data["type"] == "PDR":
367 tbl_dict[tst_name_mod]["history"][
369 append(tst_data["throughput"]["value"])
370 elif tst_data["type"] == "NDRPDR":
371 tbl_dict[tst_name_mod]["history"][item[
372 "title"]].append(tst_data["throughput"][
374 elif table["include-tests"] == "NDR":
375 if tst_data["type"] == "NDR":
376 tbl_dict[tst_name_mod]["history"][
378 append(tst_data["throughput"]["value"])
379 elif tst_data["type"] == "NDRPDR":
380 tbl_dict[tst_name_mod]["history"][item[
381 "title"]].append(tst_data["throughput"][
385 except (TypeError, KeyError):
390 for tst_name in tbl_dict.keys():
391 item = [tbl_dict[tst_name]["name"], ]
393 if tbl_dict[tst_name].get("history", None) is not None:
394 for hist_data in tbl_dict[tst_name]["history"].values():
396 item.append(round(mean(hist_data) / 1000000, 2))
397 item.append(round(stdev(hist_data) / 1000000, 2))
399 item.extend(["Not tested", "Not tested"])
401 item.extend(["Not tested", "Not tested"])
402 data_t = tbl_dict[tst_name]["ref-data"]
404 item.append(round(mean(data_t) / 1000000, 2))
405 item.append(round(stdev(data_t) / 1000000, 2))
407 item.extend(["Not tested", "Not tested"])
408 data_t = tbl_dict[tst_name]["cmp-data"]
410 item.append(round(mean(data_t) / 1000000, 2))
411 item.append(round(stdev(data_t) / 1000000, 2))
413 item.extend(["Not tested", "Not tested"])
414 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
415 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
416 elif item[-4] is None or item[-2] is None or item[-4] == 0:
417 item.append("New in CSIT-1908")
418 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419 item.append("See footnote [1]")
421 if (len(item) == len(header)) and (item[-3] is not None):
424 # Sort the table according to the relative change
425 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
427 # Generate csv tables:
428 csv_file = "{0}.csv".format(table["output-file"])
429 with open(csv_file, "w") as file_handler:
430 file_handler.write(header_str)
432 file_handler.write(",".join([str(item) for item in test]) + "\n")
434 txt_file_name = "{0}.txt".format(table["output-file"])
435 convert_csv_to_pretty_txt(csv_file, txt_file_name)
438 with open(txt_file_name, 'a') as txt_file:
439 txt_file.writelines([
441 "[1] CSIT-1908 changed test methodology of dot1q tests in "
442 "2n-skx testbeds, dot1q encapsulation is now used on both "
444 " Previously dot1q was used only on a single link with the "
445 "other link carrying untagged Ethernet frames. This change "
447 " in slightly lower throughput in CSIT-1908 for these "
448 "tests. See release notes."
452 def table_performance_comparison_nic(table, input_data):
453 """Generate the table(s) with algorithm: table_performance_comparison
454 specified in the specification file.
456 :param table: Table to generate.
457 :param input_data: Data to process.
458 :type table: pandas.Series
459 :type input_data: InputData
462 logging.info(" Generating the table {0} ...".
463 format(table.get("title", "")))
466 logging.info(" Creating the data set for the {0} '{1}'.".
467 format(table.get("type", ""), table.get("title", "")))
468 data = input_data.filter_data(table, continue_on_error=True)
470 # Prepare the header of the tables
472 header = ["Test case", ]
474 if table["include-tests"] == "MRR":
475 hdr_param = "Rec Rate"
479 history = table.get("history", None)
483 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
484 "{0} Stdev [Mpps]".format(item["title"])])
486 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
487 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
488 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
489 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
491 header_str = ",".join(header) + "\n"
492 except (AttributeError, KeyError) as err:
493 logging.error("The model is invalid, missing parameter: {0}".
497 # Prepare data to the table:
499 for job, builds in table["reference"]["data"].items():
500 topo = "2n-skx" if "2n-skx" in job else ""
502 for tst_name, tst_data in data[job][str(build)].iteritems():
503 if table["reference"]["nic"] not in tst_data["tags"]:
505 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
506 replace("-ndrpdr", "").replace("-pdrdisc", "").\
507 replace("-ndrdisc", "").replace("-pdr", "").\
508 replace("-ndr", "").\
509 replace("1t1c", "1c").replace("2t1c", "1c").\
510 replace("2t2c", "2c").replace("4t2c", "2c").\
511 replace("4t4c", "4c").replace("8t4c", "4c")
512 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
513 if "across topologies" in table["title"].lower():
514 tst_name_mod = tst_name_mod.replace("2n1l-", "")
515 if tbl_dict.get(tst_name_mod, None) is None:
516 name = "{0}".format("-".join(tst_data["name"].
518 if "across testbeds" in table["title"].lower() or \
519 "across topologies" in table["title"].lower():
521 replace("1t1c", "1c").replace("2t1c", "1c").\
522 replace("2t2c", "2c").replace("4t2c", "2c").\
523 replace("4t4c", "4c").replace("8t4c", "4c")
524 tbl_dict[tst_name_mod] = {"name": name,
528 # TODO: Re-work when NDRPDRDISC tests are not used
529 if table["include-tests"] == "MRR":
530 tbl_dict[tst_name_mod]["ref-data"]. \
531 append(tst_data["result"]["receive-rate"].avg)
532 elif table["include-tests"] == "PDR":
533 if tst_data["type"] == "PDR":
534 tbl_dict[tst_name_mod]["ref-data"]. \
535 append(tst_data["throughput"]["value"])
536 elif tst_data["type"] == "NDRPDR":
537 tbl_dict[tst_name_mod]["ref-data"].append(
538 tst_data["throughput"]["PDR"]["LOWER"])
539 elif table["include-tests"] == "NDR":
540 if tst_data["type"] == "NDR":
541 tbl_dict[tst_name_mod]["ref-data"]. \
542 append(tst_data["throughput"]["value"])
543 elif tst_data["type"] == "NDRPDR":
544 tbl_dict[tst_name_mod]["ref-data"].append(
545 tst_data["throughput"]["NDR"]["LOWER"])
549 pass # No data in output.xml for this test
551 for job, builds in table["compare"]["data"].items():
553 for tst_name, tst_data in data[job][str(build)].iteritems():
554 if table["compare"]["nic"] not in tst_data["tags"]:
556 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
557 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
558 replace("-ndrdisc", "").replace("-pdr", ""). \
559 replace("-ndr", "").\
560 replace("1t1c", "1c").replace("2t1c", "1c").\
561 replace("2t2c", "2c").replace("4t2c", "2c").\
562 replace("4t4c", "4c").replace("8t4c", "4c")
563 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
564 if "across topologies" in table["title"].lower():
565 tst_name_mod = tst_name_mod.replace("2n1l-", "")
566 if tbl_dict.get(tst_name_mod, None) is None:
567 name = "{0}".format("-".join(tst_data["name"].
569 if "across testbeds" in table["title"].lower() or \
570 "across topologies" in table["title"].lower():
572 replace("1t1c", "1c").replace("2t1c", "1c").\
573 replace("2t2c", "2c").replace("4t2c", "2c").\
574 replace("4t4c", "4c").replace("8t4c", "4c")
575 tbl_dict[tst_name_mod] = {"name": name,
579 # TODO: Re-work when NDRPDRDISC tests are not used
580 if table["include-tests"] == "MRR":
581 tbl_dict[tst_name_mod]["cmp-data"]. \
582 append(tst_data["result"]["receive-rate"].avg)
583 elif table["include-tests"] == "PDR":
584 if tst_data["type"] == "PDR":
585 tbl_dict[tst_name_mod]["cmp-data"]. \
586 append(tst_data["throughput"]["value"])
587 elif tst_data["type"] == "NDRPDR":
588 tbl_dict[tst_name_mod]["cmp-data"].append(
589 tst_data["throughput"]["PDR"]["LOWER"])
590 elif table["include-tests"] == "NDR":
591 if tst_data["type"] == "NDR":
592 tbl_dict[tst_name_mod]["cmp-data"]. \
593 append(tst_data["throughput"]["value"])
594 elif tst_data["type"] == "NDRPDR":
595 tbl_dict[tst_name_mod]["cmp-data"].append(
596 tst_data["throughput"]["NDR"]["LOWER"])
599 except (KeyError, TypeError):
604 for job, builds in item["data"].items():
606 for tst_name, tst_data in data[job][str(build)].iteritems():
607 if item["nic"] not in tst_data["tags"]:
609 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
610 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
611 replace("-ndrdisc", "").replace("-pdr", ""). \
612 replace("-ndr", "").\
613 replace("1t1c", "1c").replace("2t1c", "1c").\
614 replace("2t2c", "2c").replace("4t2c", "2c").\
615 replace("4t4c", "4c").replace("8t4c", "4c")
616 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
617 if "across topologies" in table["title"].lower():
618 tst_name_mod = tst_name_mod.replace("2n1l-", "")
619 if tbl_dict.get(tst_name_mod, None) is None:
621 if tbl_dict[tst_name_mod].get("history", None) is None:
622 tbl_dict[tst_name_mod]["history"] = OrderedDict()
623 if tbl_dict[tst_name_mod]["history"].get(item["title"],
625 tbl_dict[tst_name_mod]["history"][item["title"]] = \
628 # TODO: Re-work when NDRPDRDISC tests are not used
629 if table["include-tests"] == "MRR":
630 tbl_dict[tst_name_mod]["history"][item["title"
631 ]].append(tst_data["result"]["receive-rate"].
633 elif table["include-tests"] == "PDR":
634 if tst_data["type"] == "PDR":
635 tbl_dict[tst_name_mod]["history"][
637 append(tst_data["throughput"]["value"])
638 elif tst_data["type"] == "NDRPDR":
639 tbl_dict[tst_name_mod]["history"][item[
640 "title"]].append(tst_data["throughput"][
642 elif table["include-tests"] == "NDR":
643 if tst_data["type"] == "NDR":
644 tbl_dict[tst_name_mod]["history"][
646 append(tst_data["throughput"]["value"])
647 elif tst_data["type"] == "NDRPDR":
648 tbl_dict[tst_name_mod]["history"][item[
649 "title"]].append(tst_data["throughput"][
653 except (TypeError, KeyError):
658 for tst_name in tbl_dict.keys():
659 item = [tbl_dict[tst_name]["name"], ]
661 if tbl_dict[tst_name].get("history", None) is not None:
662 for hist_data in tbl_dict[tst_name]["history"].values():
664 item.append(round(mean(hist_data) / 1000000, 2))
665 item.append(round(stdev(hist_data) / 1000000, 2))
667 item.extend(["Not tested", "Not tested"])
669 item.extend(["Not tested", "Not tested"])
670 data_t = tbl_dict[tst_name]["ref-data"]
672 item.append(round(mean(data_t) / 1000000, 2))
673 item.append(round(stdev(data_t) / 1000000, 2))
675 item.extend(["Not tested", "Not tested"])
676 data_t = tbl_dict[tst_name]["cmp-data"]
678 item.append(round(mean(data_t) / 1000000, 2))
679 item.append(round(stdev(data_t) / 1000000, 2))
681 item.extend(["Not tested", "Not tested"])
682 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
683 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
684 elif item[-4] is None or item[-2] is None or item[-4] == 0:
685 item.append("New in CSIT-1908")
686 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
687 item.append("See footnote [1]")
689 if (len(item) == len(header)) and (item[-3] is not None):
692 # Sort the table according to the relative change
693 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
695 # Generate csv tables:
696 csv_file = "{0}.csv".format(table["output-file"])
697 with open(csv_file, "w") as file_handler:
698 file_handler.write(header_str)
700 file_handler.write(",".join([str(item) for item in test]) + "\n")
702 txt_file_name = "{0}.txt".format(table["output-file"])
703 convert_csv_to_pretty_txt(csv_file, txt_file_name)
706 with open(txt_file_name, 'a') as txt_file:
707 txt_file.writelines([
709 "[1] CSIT-1908 changed test methodology of dot1q tests in "
710 "2n-skx testbeds, dot1q encapsulation is now used on both "
712 " Previously dot1q was used only on a single link with the "
713 "other link carrying untagged Ethernet frames. This change "
715 " in slightly lower throughput in CSIT-1908 for these "
716 "tests. See release notes."
720 def table_nics_comparison(table, input_data):
721 """Generate the table(s) with algorithm: table_nics_comparison
722 specified in the specification file.
724 :param table: Table to generate.
725 :param input_data: Data to process.
726 :type table: pandas.Series
727 :type input_data: InputData
730 logging.info(" Generating the table {0} ...".
731 format(table.get("title", "")))
734 logging.info(" Creating the data set for the {0} '{1}'.".
735 format(table.get("type", ""), table.get("title", "")))
736 data = input_data.filter_data(table, continue_on_error=True)
738 # Prepare the header of the tables
740 header = ["Test case", ]
742 if table["include-tests"] == "MRR":
743 hdr_param = "Receive Rate"
745 hdr_param = "Throughput"
748 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
749 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
750 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
751 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
753 header_str = ",".join(header) + "\n"
754 except (AttributeError, KeyError) as err:
755 logging.error("The model is invalid, missing parameter: {0}".
759 # Prepare data to the table:
761 for job, builds in table["data"].items():
763 for tst_name, tst_data in data[job][str(build)].iteritems():
764 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
765 replace("-ndrpdr", "").replace("-pdrdisc", "").\
766 replace("-ndrdisc", "").replace("-pdr", "").\
767 replace("-ndr", "").\
768 replace("1t1c", "1c").replace("2t1c", "1c").\
769 replace("2t2c", "2c").replace("4t2c", "2c").\
770 replace("4t4c", "4c").replace("8t4c", "4c")
771 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
772 if tbl_dict.get(tst_name_mod, None) is None:
773 name = "-".join(tst_data["name"].split("-")[:-1])
774 tbl_dict[tst_name_mod] = {"name": name,
778 if table["include-tests"] == "MRR":
779 result = tst_data["result"]["receive-rate"].avg
780 elif table["include-tests"] == "PDR":
781 result = tst_data["throughput"]["PDR"]["LOWER"]
782 elif table["include-tests"] == "NDR":
783 result = tst_data["throughput"]["NDR"]["LOWER"]
788 if table["reference"]["nic"] in tst_data["tags"]:
789 tbl_dict[tst_name_mod]["ref-data"].append(result)
790 elif table["compare"]["nic"] in tst_data["tags"]:
791 tbl_dict[tst_name_mod]["cmp-data"].append(result)
792 except (TypeError, KeyError) as err:
793 logging.debug("No data for {0}".format(tst_name))
794 logging.debug(repr(err))
795 # No data in output.xml for this test
798 for tst_name in tbl_dict.keys():
799 item = [tbl_dict[tst_name]["name"], ]
800 data_t = tbl_dict[tst_name]["ref-data"]
802 item.append(round(mean(data_t) / 1000000, 2))
803 item.append(round(stdev(data_t) / 1000000, 2))
805 item.extend([None, None])
806 data_t = tbl_dict[tst_name]["cmp-data"]
808 item.append(round(mean(data_t) / 1000000, 2))
809 item.append(round(stdev(data_t) / 1000000, 2))
811 item.extend([None, None])
812 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
813 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
814 if len(item) == len(header):
817 # Sort the table according to the relative change
818 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
820 # Generate csv tables:
821 csv_file = "{0}.csv".format(table["output-file"])
822 with open(csv_file, "w") as file_handler:
823 file_handler.write(header_str)
825 file_handler.write(",".join([str(item) for item in test]) + "\n")
827 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
830 def table_soak_vs_ndr(table, input_data):
831 """Generate the table(s) with algorithm: table_soak_vs_ndr
832 specified in the specification file.
834 :param table: Table to generate.
835 :param input_data: Data to process.
836 :type table: pandas.Series
837 :type input_data: InputData
840 logging.info(" Generating the table {0} ...".
841 format(table.get("title", "")))
844 logging.info(" Creating the data set for the {0} '{1}'.".
845 format(table.get("type", ""), table.get("title", "")))
846 data = input_data.filter_data(table, continue_on_error=True)
848 # Prepare the header of the table
852 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
853 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
854 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
855 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
856 "Delta [%]", "Stdev of delta [%]"]
857 header_str = ",".join(header) + "\n"
858 except (AttributeError, KeyError) as err:
859 logging.error("The model is invalid, missing parameter: {0}".
863 # Create a list of available SOAK test results:
865 for job, builds in table["compare"]["data"].items():
867 for tst_name, tst_data in data[job][str(build)].iteritems():
868 if tst_data["type"] == "SOAK":
869 tst_name_mod = tst_name.replace("-soak", "")
870 if tbl_dict.get(tst_name_mod, None) is None:
871 groups = re.search(REGEX_NIC, tst_data["parent"])
872 nic = groups.group(0) if groups else ""
873 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
875 tbl_dict[tst_name_mod] = {
881 tbl_dict[tst_name_mod]["cmp-data"].append(
882 tst_data["throughput"]["LOWER"])
883 except (KeyError, TypeError):
885 tests_lst = tbl_dict.keys()
887 # Add corresponding NDR test results:
888 for job, builds in table["reference"]["data"].items():
890 for tst_name, tst_data in data[job][str(build)].iteritems():
891 tst_name_mod = tst_name.replace("-ndrpdr", "").\
893 if tst_name_mod in tests_lst:
895 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
896 if table["include-tests"] == "MRR":
897 result = tst_data["result"]["receive-rate"].avg
898 elif table["include-tests"] == "PDR":
899 result = tst_data["throughput"]["PDR"]["LOWER"]
900 elif table["include-tests"] == "NDR":
901 result = tst_data["throughput"]["NDR"]["LOWER"]
904 if result is not None:
905 tbl_dict[tst_name_mod]["ref-data"].append(
907 except (KeyError, TypeError):
911 for tst_name in tbl_dict.keys():
912 item = [tbl_dict[tst_name]["name"], ]
913 data_r = tbl_dict[tst_name]["ref-data"]
915 data_r_mean = mean(data_r)
916 item.append(round(data_r_mean / 1000000, 2))
917 data_r_stdev = stdev(data_r)
918 item.append(round(data_r_stdev / 1000000, 2))
922 item.extend([None, None])
923 data_c = tbl_dict[tst_name]["cmp-data"]
925 data_c_mean = mean(data_c)
926 item.append(round(data_c_mean / 1000000, 2))
927 data_c_stdev = stdev(data_c)
928 item.append(round(data_c_stdev / 1000000, 2))
932 item.extend([None, None])
933 if data_r_mean and data_c_mean:
934 delta, d_stdev = relative_change_stdev(
935 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
936 item.append(round(delta, 2))
937 item.append(round(d_stdev, 2))
940 # Sort the table according to the relative change
941 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
943 # Generate csv tables:
944 csv_file = "{0}.csv".format(table["output-file"])
945 with open(csv_file, "w") as file_handler:
946 file_handler.write(header_str)
948 file_handler.write(",".join([str(item) for item in test]) + "\n")
950 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
953 def table_performance_trending_dashboard(table, input_data):
954 """Generate the table(s) with algorithm:
955 table_performance_trending_dashboard
956 specified in the specification file.
958 :param table: Table to generate.
959 :param input_data: Data to process.
960 :type table: pandas.Series
961 :type input_data: InputData
964 logging.info(" Generating the table {0} ...".
965 format(table.get("title", "")))
968 logging.info(" Creating the data set for the {0} '{1}'.".
969 format(table.get("type", ""), table.get("title", "")))
970 data = input_data.filter_data(table, continue_on_error=True)
972 # Prepare the header of the tables
973 header = ["Test Case",
975 "Short-Term Change [%]",
976 "Long-Term Change [%]",
980 header_str = ",".join(header) + "\n"
982 # Prepare data to the table:
984 for job, builds in table["data"].items():
986 for tst_name, tst_data in data[job][str(build)].iteritems():
987 if tst_name.lower() in table.get("ignore-list", list()):
989 if tbl_dict.get(tst_name, None) is None:
990 groups = re.search(REGEX_NIC, tst_data["parent"])
993 nic = groups.group(0)
994 tbl_dict[tst_name] = {
995 "name": "{0}-{1}".format(nic, tst_data["name"]),
996 "data": OrderedDict()}
998 tbl_dict[tst_name]["data"][str(build)] = \
999 tst_data["result"]["receive-rate"]
1000 except (TypeError, KeyError):
1001 pass # No data in output.xml for this test
1004 for tst_name in tbl_dict.keys():
1005 data_t = tbl_dict[tst_name]["data"]
1009 classification_lst, avgs = classify_anomalies(data_t)
1011 win_size = min(len(data_t), table["window"])
1012 long_win_size = min(len(data_t), table["long-trend-window"])
1016 [x for x in avgs[-long_win_size:-win_size]
1021 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1023 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1024 rel_change_last = nan
1026 rel_change_last = round(
1027 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1029 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1030 rel_change_long = nan
1032 rel_change_long = round(
1033 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1035 if classification_lst:
1036 if isnan(rel_change_last) and isnan(rel_change_long):
1038 if (isnan(last_avg) or
1039 isnan(rel_change_last) or
1040 isnan(rel_change_long)):
1043 [tbl_dict[tst_name]["name"],
1044 round(last_avg / 1000000, 2),
1047 classification_lst[-win_size:].count("regression"),
1048 classification_lst[-win_size:].count("progression")])
1050 tbl_lst.sort(key=lambda rel: rel[0])
1053 for nrr in range(table["window"], -1, -1):
1054 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1055 for nrp in range(table["window"], -1, -1):
1056 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1057 tbl_out.sort(key=lambda rel: rel[2])
1058 tbl_sorted.extend(tbl_out)
1060 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1062 logging.info(" Writing file: '{0}'".format(file_name))
1063 with open(file_name, "w") as file_handler:
1064 file_handler.write(header_str)
1065 for test in tbl_sorted:
1066 file_handler.write(",".join([str(item) for item in test]) + '\n')
1068 txt_file_name = "{0}.txt".format(table["output-file"])
1069 logging.info(" Writing file: '{0}'".format(txt_file_name))
1070 convert_csv_to_pretty_txt(file_name, txt_file_name)
1073 def _generate_url(base, testbed, test_name):
1074 """Generate URL to a trending plot from the name of the test case.
1076 :param base: The base part of URL common to all test cases.
1077 :param testbed: The testbed used for testing.
1078 :param test_name: The name of the test case.
1081 :type test_name: str
1082 :returns: The URL to the plot with the trending data for the given test
1092 if "lbdpdk" in test_name or "lbvpp" in test_name:
1093 file_name = "link_bonding"
1095 elif "114b" in test_name and "vhost" in test_name:
1098 elif "testpmd" in test_name or "l3fwd" in test_name:
1101 elif "memif" in test_name:
1102 file_name = "container_memif"
1105 elif "srv6" in test_name:
1108 elif "vhost" in test_name:
1109 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1110 file_name = "vm_vhost_l2"
1111 if "114b" in test_name:
1113 elif "l2xcbase" in test_name and "x520" in test_name:
1114 feature = "-base-l2xc"
1115 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1116 feature = "-base-l2bd"
1119 elif "ip4base" in test_name:
1120 file_name = "vm_vhost_ip4"
1123 elif "ipsecbasetnlsw" in test_name:
1124 file_name = "ipsecsw"
1125 feature = "-base-scale"
1127 elif "ipsec" in test_name:
1129 feature = "-base-scale"
1130 if "hw-" in test_name:
1131 file_name = "ipsechw"
1132 elif "sw-" in test_name:
1133 file_name = "ipsecsw"
1134 if "-int-" in test_name:
1135 feature = "-base-scale-int"
1136 elif "tnl" in test_name:
1137 feature = "-base-scale-tnl"
1139 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1140 file_name = "ip4_tunnels"
1143 elif "ip4base" in test_name or "ip4scale" in test_name:
1145 if "xl710" in test_name:
1146 feature = "-base-scale-features"
1147 elif "iacl" in test_name:
1148 feature = "-features-iacl"
1149 elif "oacl" in test_name:
1150 feature = "-features-oacl"
1151 elif "snat" in test_name or "cop" in test_name:
1152 feature = "-features"
1154 feature = "-base-scale"
1156 elif "ip6base" in test_name or "ip6scale" in test_name:
1158 feature = "-base-scale"
1160 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1161 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1162 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1164 if "macip" in test_name:
1165 feature = "-features-macip"
1166 elif "iacl" in test_name:
1167 feature = "-features-iacl"
1168 elif "oacl" in test_name:
1169 feature = "-features-oacl"
1171 feature = "-base-scale"
1173 if "x520" in test_name:
1175 elif "x710" in test_name:
1177 elif "xl710" in test_name:
1179 elif "xxv710" in test_name:
1181 elif "vic1227" in test_name:
1183 elif "vic1385" in test_name:
1185 elif "x553" in test_name:
1191 if "64b" in test_name:
1193 elif "78b" in test_name:
1195 elif "imix" in test_name:
1197 elif "9000b" in test_name:
1199 elif "1518b" in test_name:
1201 elif "114b" in test_name:
1205 anchor += framesize + '-'
1207 if "1t1c" in test_name:
1209 elif "2t2c" in test_name:
1211 elif "4t4c" in test_name:
1213 elif "2t1c" in test_name:
1215 elif "4t2c" in test_name:
1217 elif "8t4c" in test_name:
1220 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1221 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1224 def table_performance_trending_dashboard_html(table, input_data):
1225 """Generate the table(s) with algorithm:
1226 table_performance_trending_dashboard_html specified in the specification
1229 :param table: Table to generate.
1230 :param input_data: Data to process.
1232 :type input_data: InputData
1235 testbed = table.get("testbed", None)
1237 logging.error("The testbed is not defined for the table '{0}'.".
1238 format(table.get("title", "")))
1241 logging.info(" Generating the table {0} ...".
1242 format(table.get("title", "")))
1245 with open(table["input-file"], 'rb') as csv_file:
1246 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1247 csv_lst = [item for item in csv_content]
1249 logging.warning("The input file is not defined.")
1251 except csv.Error as err:
1252 logging.warning("Not possible to process the file '{0}'.\n{1}".
1253 format(table["input-file"], err))
1257 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1260 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1261 for idx, item in enumerate(csv_lst[0]):
1262 alignment = "left" if idx == 0 else "center"
1263 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1267 colors = {"regression": ("#ffcccc", "#ff9999"),
1268 "progression": ("#c6ecc6", "#9fdf9f"),
1269 "normal": ("#e9f1fb", "#d4e4f7")}
1270 for r_idx, row in enumerate(csv_lst[1:]):
1272 color = "regression"
1274 color = "progression"
1277 background = colors[color][r_idx % 2]
1278 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1281 for c_idx, item in enumerate(row):
1282 alignment = "left" if c_idx == 0 else "center"
1283 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1286 url = _generate_url("../trending/", testbed, item)
1287 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1292 with open(table["output-file"], 'w') as html_file:
1293 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1294 html_file.write(".. raw:: html\n\n\t")
1295 html_file.write(ET.tostring(dashboard))
1296 html_file.write("\n\t<p><br><br></p>\n")
1298 logging.warning("The output file is not defined.")
1302 def table_last_failed_tests(table, input_data):
1303 """Generate the table(s) with algorithm: table_last_failed_tests
1304 specified in the specification file.
1306 :param table: Table to generate.
1307 :param input_data: Data to process.
1308 :type table: pandas.Series
1309 :type input_data: InputData
1312 logging.info(" Generating the table {0} ...".
1313 format(table.get("title", "")))
1315 # Transform the data
1316 logging.info(" Creating the data set for the {0} '{1}'.".
1317 format(table.get("type", ""), table.get("title", "")))
1318 data = input_data.filter_data(table, continue_on_error=True)
1320 if data is None or data.empty:
1321 logging.warn(" No data for the {0} '{1}'.".
1322 format(table.get("type", ""), table.get("title", "")))
1326 for job, builds in table["data"].items():
1327 for build in builds:
1330 version = input_data.metadata(job, build).get("version", "")
1332 logging.error("Data for {job}: {build} is not present.".
1333 format(job=job, build=build))
1335 tbl_list.append(build)
1336 tbl_list.append(version)
1337 for tst_name, tst_data in data[job][build].iteritems():
1338 if tst_data["status"] != "FAIL":
1340 groups = re.search(REGEX_NIC, tst_data["parent"])
1343 nic = groups.group(0)
1344 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1346 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1347 logging.info(" Writing file: '{0}'".format(file_name))
1348 with open(file_name, "w") as file_handler:
1349 for test in tbl_list:
1350 file_handler.write(test + '\n')
1353 def table_failed_tests(table, input_data):
1354 """Generate the table(s) with algorithm: table_failed_tests
1355 specified in the specification file.
1357 :param table: Table to generate.
1358 :param input_data: Data to process.
1359 :type table: pandas.Series
1360 :type input_data: InputData
1363 logging.info(" Generating the table {0} ...".
1364 format(table.get("title", "")))
1366 # Transform the data
1367 logging.info(" Creating the data set for the {0} '{1}'.".
1368 format(table.get("type", ""), table.get("title", "")))
1369 data = input_data.filter_data(table, continue_on_error=True)
1371 # Prepare the header of the tables
1372 header = ["Test Case",
1374 "Last Failure [Time]",
1375 "Last Failure [VPP-Build-Id]",
1376 "Last Failure [CSIT-Job-Build-Id]"]
1378 # Generate the data for the table according to the model in the table
1382 timeperiod = timedelta(int(table.get("window", 7)))
1385 for job, builds in table["data"].items():
1386 for build in builds:
1388 for tst_name, tst_data in data[job][build].iteritems():
1389 if tst_name.lower() in table.get("ignore-list", list()):
1391 if tbl_dict.get(tst_name, None) is None:
1392 groups = re.search(REGEX_NIC, tst_data["parent"])
1395 nic = groups.group(0)
1396 tbl_dict[tst_name] = {
1397 "name": "{0}-{1}".format(nic, tst_data["name"]),
1398 "data": OrderedDict()}
1400 generated = input_data.metadata(job, build).\
1401 get("generated", "")
1404 then = dt.strptime(generated, "%Y%m%d %H:%M")
1405 if (now - then) <= timeperiod:
1406 tbl_dict[tst_name]["data"][build] = (
1409 input_data.metadata(job, build).get("version", ""),
1411 except (TypeError, KeyError) as err:
1412 logging.warning("tst_name: {} - err: {}".
1413 format(tst_name, repr(err)))
1417 for tst_data in tbl_dict.values():
1419 for val in tst_data["data"].values():
1420 if val[0] == "FAIL":
1422 fails_last_date = val[1]
1423 fails_last_vpp = val[2]
1424 fails_last_csit = val[3]
1426 max_fails = fails_nr if fails_nr > max_fails else max_fails
1427 tbl_lst.append([tst_data["name"],
1431 "mrr-daily-build-{0}".format(fails_last_csit)])
1433 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1435 for nrf in range(max_fails, -1, -1):
1436 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1437 tbl_sorted.extend(tbl_fails)
1438 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1440 logging.info(" Writing file: '{0}'".format(file_name))
1441 with open(file_name, "w") as file_handler:
1442 file_handler.write(",".join(header) + "\n")
1443 for test in tbl_sorted:
1444 file_handler.write(",".join([str(item) for item in test]) + '\n')
1446 txt_file_name = "{0}.txt".format(table["output-file"])
1447 logging.info(" Writing file: '{0}'".format(txt_file_name))
1448 convert_csv_to_pretty_txt(file_name, txt_file_name)
1451 def table_failed_tests_html(table, input_data):
1452 """Generate the table(s) with algorithm: table_failed_tests_html
1453 specified in the specification file.
1455 :param table: Table to generate.
1456 :param input_data: Data to process.
1457 :type table: pandas.Series
1458 :type input_data: InputData
1461 testbed = table.get("testbed", None)
1463 logging.error("The testbed is not defined for the table '{0}'.".
1464 format(table.get("title", "")))
1467 logging.info(" Generating the table {0} ...".
1468 format(table.get("title", "")))
1471 with open(table["input-file"], 'rb') as csv_file:
1472 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1473 csv_lst = [item for item in csv_content]
1475 logging.warning("The input file is not defined.")
1477 except csv.Error as err:
1478 logging.warning("Not possible to process the file '{0}'.\n{1}".
1479 format(table["input-file"], err))
1483 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1486 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1487 for idx, item in enumerate(csv_lst[0]):
1488 alignment = "left" if idx == 0 else "center"
1489 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1493 colors = ("#e9f1fb", "#d4e4f7")
1494 for r_idx, row in enumerate(csv_lst[1:]):
1495 background = colors[r_idx % 2]
1496 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1499 for c_idx, item in enumerate(row):
1500 alignment = "left" if c_idx == 0 else "center"
1501 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1504 url = _generate_url("../trending/", testbed, item)
1505 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1510 with open(table["output-file"], 'w') as html_file:
1511 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1512 html_file.write(".. raw:: html\n\n\t")
1513 html_file.write(ET.tostring(failed_tests))
1514 html_file.write("\n\t<p><br><br></p>\n")
1516 logging.warning("The output file is not defined.")