1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Rec Rate"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
238 topo = "2n-skx" if "2n-skx" in job else ""
240 for tst_name, tst_data in data[job][str(build)].iteritems():
241 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242 replace("-ndrpdr", "").replace("-pdrdisc", "").\
243 replace("-ndrdisc", "").replace("-pdr", "").\
244 replace("-ndr", "").\
245 replace("1t1c", "1c").replace("2t1c", "1c").\
246 replace("2t2c", "2c").replace("4t2c", "2c").\
247 replace("4t4c", "4c").replace("8t4c", "4c")
248 if "across topologies" in table["title"].lower():
249 tst_name_mod = tst_name_mod.replace("2n1l-", "")
250 if tbl_dict.get(tst_name_mod, None) is None:
251 groups = re.search(REGEX_NIC, tst_data["parent"])
252 nic = groups.group(0) if groups else ""
253 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
255 if "across testbeds" in table["title"].lower() or \
256 "across topologies" in table["title"].lower():
258 replace("1t1c", "1c").replace("2t1c", "1c").\
259 replace("2t2c", "2c").replace("4t2c", "2c").\
260 replace("4t4c", "4c").replace("8t4c", "4c")
261 tbl_dict[tst_name_mod] = {"name": name,
265 # TODO: Re-work when NDRPDRDISC tests are not used
266 if table["include-tests"] == "MRR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["result"]["receive-rate"].avg)
269 elif table["include-tests"] == "PDR":
270 if tst_data["type"] == "PDR":
271 tbl_dict[tst_name_mod]["ref-data"]. \
272 append(tst_data["throughput"]["value"])
273 elif tst_data["type"] == "NDRPDR":
274 tbl_dict[tst_name_mod]["ref-data"].append(
275 tst_data["throughput"]["PDR"]["LOWER"])
276 elif table["include-tests"] == "NDR":
277 if tst_data["type"] == "NDR":
278 tbl_dict[tst_name_mod]["ref-data"]. \
279 append(tst_data["throughput"]["value"])
280 elif tst_data["type"] == "NDRPDR":
281 tbl_dict[tst_name_mod]["ref-data"].append(
282 tst_data["throughput"]["NDR"]["LOWER"])
286 pass # No data in output.xml for this test
288 for job, builds in table["compare"]["data"].items():
290 for tst_name, tst_data in data[job][str(build)].iteritems():
291 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293 replace("-ndrdisc", "").replace("-pdr", ""). \
294 replace("-ndr", "").\
295 replace("1t1c", "1c").replace("2t1c", "1c").\
296 replace("2t2c", "2c").replace("4t2c", "2c").\
297 replace("4t4c", "4c").replace("8t4c", "4c")
298 if "across topologies" in table["title"].lower():
299 tst_name_mod = tst_name_mod.replace("2n1l-", "")
300 if tbl_dict.get(tst_name_mod, None) is None:
301 groups = re.search(REGEX_NIC, tst_data["parent"])
302 nic = groups.group(0) if groups else ""
303 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
305 if "across testbeds" in table["title"].lower() or \
306 "across topologies" in table["title"].lower():
308 replace("1t1c", "1c").replace("2t1c", "1c").\
309 replace("2t2c", "2c").replace("4t2c", "2c").\
310 replace("4t4c", "4c").replace("8t4c", "4c")
311 tbl_dict[tst_name_mod] = {"name": name,
315 # TODO: Re-work when NDRPDRDISC tests are not used
316 if table["include-tests"] == "MRR":
317 tbl_dict[tst_name_mod]["cmp-data"]. \
318 append(tst_data["result"]["receive-rate"].avg)
319 elif table["include-tests"] == "PDR":
320 if tst_data["type"] == "PDR":
321 tbl_dict[tst_name_mod]["cmp-data"]. \
322 append(tst_data["throughput"]["value"])
323 elif tst_data["type"] == "NDRPDR":
324 tbl_dict[tst_name_mod]["cmp-data"].append(
325 tst_data["throughput"]["PDR"]["LOWER"])
326 elif table["include-tests"] == "NDR":
327 if tst_data["type"] == "NDR":
328 tbl_dict[tst_name_mod]["cmp-data"]. \
329 append(tst_data["throughput"]["value"])
330 elif tst_data["type"] == "NDRPDR":
331 tbl_dict[tst_name_mod]["cmp-data"].append(
332 tst_data["throughput"]["NDR"]["LOWER"])
335 except (KeyError, TypeError):
339 for job, builds in item["data"].items():
341 for tst_name, tst_data in data[job][str(build)].iteritems():
342 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344 replace("-ndrdisc", "").replace("-pdr", ""). \
345 replace("-ndr", "").\
346 replace("1t1c", "1c").replace("2t1c", "1c").\
347 replace("2t2c", "2c").replace("4t2c", "2c").\
348 replace("4t4c", "4c").replace("8t4c", "4c")
349 if "across topologies" in table["title"].lower():
350 tst_name_mod = tst_name_mod.replace("2n1l-", "")
351 if tbl_dict.get(tst_name_mod, None) is None:
353 if tbl_dict[tst_name_mod].get("history", None) is None:
354 tbl_dict[tst_name_mod]["history"] = OrderedDict()
355 if tbl_dict[tst_name_mod]["history"].get(item["title"],
357 tbl_dict[tst_name_mod]["history"][item["title"]] = \
360 # TODO: Re-work when NDRPDRDISC tests are not used
361 if table["include-tests"] == "MRR":
362 tbl_dict[tst_name_mod]["history"][item["title"
363 ]].append(tst_data["result"]["receive-rate"].
365 elif table["include-tests"] == "PDR":
366 if tst_data["type"] == "PDR":
367 tbl_dict[tst_name_mod]["history"][
369 append(tst_data["throughput"]["value"])
370 elif tst_data["type"] == "NDRPDR":
371 tbl_dict[tst_name_mod]["history"][item[
372 "title"]].append(tst_data["throughput"][
374 elif table["include-tests"] == "NDR":
375 if tst_data["type"] == "NDR":
376 tbl_dict[tst_name_mod]["history"][
378 append(tst_data["throughput"]["value"])
379 elif tst_data["type"] == "NDRPDR":
380 tbl_dict[tst_name_mod]["history"][item[
381 "title"]].append(tst_data["throughput"][
385 except (TypeError, KeyError):
390 for tst_name in tbl_dict.keys():
391 item = [tbl_dict[tst_name]["name"], ]
393 if tbl_dict[tst_name].get("history", None) is not None:
394 for hist_data in tbl_dict[tst_name]["history"].values():
396 item.append(round(mean(hist_data) / 1000000, 2))
397 item.append(round(stdev(hist_data) / 1000000, 2))
399 item.extend(["Not tested", "Not tested"])
401 item.extend(["Not tested", "Not tested"])
402 data_t = tbl_dict[tst_name]["ref-data"]
404 item.append(round(mean(data_t) / 1000000, 2))
405 item.append(round(stdev(data_t) / 1000000, 2))
407 item.extend(["Not tested", "Not tested"])
408 data_t = tbl_dict[tst_name]["cmp-data"]
410 item.append(round(mean(data_t) / 1000000, 2))
411 item.append(round(stdev(data_t) / 1000000, 2))
413 item.extend(["Not tested", "Not tested"])
414 if item[-2] == "Not tested":
416 elif item[-4] == "Not tested":
417 item.append("New in CSIT-1908")
418 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419 item.append("See footnote [1]")
422 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
423 if (len(item) == len(header)) and (item[-3] != "Not tested"):
427 # 1. New in CSIT-XXXX
434 if "New in CSIT" in item[-1]:
436 elif "See footnote" in item[-1]:
439 tbl_delta.append(item)
442 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
443 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
444 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
445 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
447 # Put the tables together:
449 tbl_lst.extend(tbl_new)
450 tbl_lst.extend(tbl_see)
451 tbl_lst.extend(tbl_delta)
453 # Generate csv tables:
454 csv_file = "{0}.csv".format(table["output-file"])
455 with open(csv_file, "w") as file_handler:
456 file_handler.write(header_str)
458 file_handler.write(",".join([str(item) for item in test]) + "\n")
460 txt_file_name = "{0}.txt".format(table["output-file"])
461 convert_csv_to_pretty_txt(csv_file, txt_file_name)
464 with open(txt_file_name, 'a') as txt_file:
465 txt_file.writelines([
467 "[1] CSIT-1908 changed test methodology of dot1q tests in "
468 "2-node testbeds, dot1q encapsulation is now used on both "
470 " Previously dot1q was used only on a single link with the "
471 "other link carrying untagged Ethernet frames. This changes "
473 " in slightly lower throughput in CSIT-1908 for these "
474 "tests. See release notes."
478 def table_performance_comparison_nic(table, input_data):
479 """Generate the table(s) with algorithm: table_performance_comparison
480 specified in the specification file.
482 :param table: Table to generate.
483 :param input_data: Data to process.
484 :type table: pandas.Series
485 :type input_data: InputData
488 logging.info(" Generating the table {0} ...".
489 format(table.get("title", "")))
492 logging.info(" Creating the data set for the {0} '{1}'.".
493 format(table.get("type", ""), table.get("title", "")))
494 data = input_data.filter_data(table, continue_on_error=True)
496 # Prepare the header of the tables
498 header = ["Test case", ]
500 if table["include-tests"] == "MRR":
501 hdr_param = "Rec Rate"
505 history = table.get("history", None)
509 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
510 "{0} Stdev [Mpps]".format(item["title"])])
512 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
513 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
514 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
515 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
517 header_str = ",".join(header) + "\n"
518 except (AttributeError, KeyError) as err:
519 logging.error("The model is invalid, missing parameter: {0}".
523 # Prepare data to the table:
525 for job, builds in table["reference"]["data"].items():
526 topo = "2n-skx" if "2n-skx" in job else ""
528 for tst_name, tst_data in data[job][str(build)].iteritems():
529 if table["reference"]["nic"] not in tst_data["tags"]:
531 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
532 replace("-ndrpdr", "").replace("-pdrdisc", "").\
533 replace("-ndrdisc", "").replace("-pdr", "").\
534 replace("-ndr", "").\
535 replace("1t1c", "1c").replace("2t1c", "1c").\
536 replace("2t2c", "2c").replace("4t2c", "2c").\
537 replace("4t4c", "4c").replace("8t4c", "4c")
538 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
539 if "across topologies" in table["title"].lower():
540 tst_name_mod = tst_name_mod.replace("2n1l-", "")
541 if tbl_dict.get(tst_name_mod, None) is None:
542 name = "{0}".format("-".join(tst_data["name"].
544 if "across testbeds" in table["title"].lower() or \
545 "across topologies" in table["title"].lower():
547 replace("1t1c", "1c").replace("2t1c", "1c").\
548 replace("2t2c", "2c").replace("4t2c", "2c").\
549 replace("4t4c", "4c").replace("8t4c", "4c")
550 tbl_dict[tst_name_mod] = {"name": name,
554 # TODO: Re-work when NDRPDRDISC tests are not used
555 if table["include-tests"] == "MRR":
556 tbl_dict[tst_name_mod]["ref-data"]. \
557 append(tst_data["result"]["receive-rate"].avg)
558 elif table["include-tests"] == "PDR":
559 if tst_data["type"] == "PDR":
560 tbl_dict[tst_name_mod]["ref-data"]. \
561 append(tst_data["throughput"]["value"])
562 elif tst_data["type"] == "NDRPDR":
563 tbl_dict[tst_name_mod]["ref-data"].append(
564 tst_data["throughput"]["PDR"]["LOWER"])
565 elif table["include-tests"] == "NDR":
566 if tst_data["type"] == "NDR":
567 tbl_dict[tst_name_mod]["ref-data"]. \
568 append(tst_data["throughput"]["value"])
569 elif tst_data["type"] == "NDRPDR":
570 tbl_dict[tst_name_mod]["ref-data"].append(
571 tst_data["throughput"]["NDR"]["LOWER"])
575 pass # No data in output.xml for this test
577 for job, builds in table["compare"]["data"].items():
579 for tst_name, tst_data in data[job][str(build)].iteritems():
580 if table["compare"]["nic"] not in tst_data["tags"]:
582 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
583 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
584 replace("-ndrdisc", "").replace("-pdr", ""). \
585 replace("-ndr", "").\
586 replace("1t1c", "1c").replace("2t1c", "1c").\
587 replace("2t2c", "2c").replace("4t2c", "2c").\
588 replace("4t4c", "4c").replace("8t4c", "4c")
589 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
590 if "across topologies" in table["title"].lower():
591 tst_name_mod = tst_name_mod.replace("2n1l-", "")
592 if tbl_dict.get(tst_name_mod, None) is None:
593 name = "{0}".format("-".join(tst_data["name"].
595 if "across testbeds" in table["title"].lower() or \
596 "across topologies" in table["title"].lower():
598 replace("1t1c", "1c").replace("2t1c", "1c").\
599 replace("2t2c", "2c").replace("4t2c", "2c").\
600 replace("4t4c", "4c").replace("8t4c", "4c")
601 tbl_dict[tst_name_mod] = {"name": name,
605 # TODO: Re-work when NDRPDRDISC tests are not used
606 if table["include-tests"] == "MRR":
607 tbl_dict[tst_name_mod]["cmp-data"]. \
608 append(tst_data["result"]["receive-rate"].avg)
609 elif table["include-tests"] == "PDR":
610 if tst_data["type"] == "PDR":
611 tbl_dict[tst_name_mod]["cmp-data"]. \
612 append(tst_data["throughput"]["value"])
613 elif tst_data["type"] == "NDRPDR":
614 tbl_dict[tst_name_mod]["cmp-data"].append(
615 tst_data["throughput"]["PDR"]["LOWER"])
616 elif table["include-tests"] == "NDR":
617 if tst_data["type"] == "NDR":
618 tbl_dict[tst_name_mod]["cmp-data"]. \
619 append(tst_data["throughput"]["value"])
620 elif tst_data["type"] == "NDRPDR":
621 tbl_dict[tst_name_mod]["cmp-data"].append(
622 tst_data["throughput"]["NDR"]["LOWER"])
625 except (KeyError, TypeError):
630 for job, builds in item["data"].items():
632 for tst_name, tst_data in data[job][str(build)].iteritems():
633 if item["nic"] not in tst_data["tags"]:
635 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
636 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
637 replace("-ndrdisc", "").replace("-pdr", ""). \
638 replace("-ndr", "").\
639 replace("1t1c", "1c").replace("2t1c", "1c").\
640 replace("2t2c", "2c").replace("4t2c", "2c").\
641 replace("4t4c", "4c").replace("8t4c", "4c")
642 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
643 if "across topologies" in table["title"].lower():
644 tst_name_mod = tst_name_mod.replace("2n1l-", "")
645 if tbl_dict.get(tst_name_mod, None) is None:
647 if tbl_dict[tst_name_mod].get("history", None) is None:
648 tbl_dict[tst_name_mod]["history"] = OrderedDict()
649 if tbl_dict[tst_name_mod]["history"].get(item["title"],
651 tbl_dict[tst_name_mod]["history"][item["title"]] = \
654 # TODO: Re-work when NDRPDRDISC tests are not used
655 if table["include-tests"] == "MRR":
656 tbl_dict[tst_name_mod]["history"][item["title"
657 ]].append(tst_data["result"]["receive-rate"].
659 elif table["include-tests"] == "PDR":
660 if tst_data["type"] == "PDR":
661 tbl_dict[tst_name_mod]["history"][
663 append(tst_data["throughput"]["value"])
664 elif tst_data["type"] == "NDRPDR":
665 tbl_dict[tst_name_mod]["history"][item[
666 "title"]].append(tst_data["throughput"][
668 elif table["include-tests"] == "NDR":
669 if tst_data["type"] == "NDR":
670 tbl_dict[tst_name_mod]["history"][
672 append(tst_data["throughput"]["value"])
673 elif tst_data["type"] == "NDRPDR":
674 tbl_dict[tst_name_mod]["history"][item[
675 "title"]].append(tst_data["throughput"][
679 except (TypeError, KeyError):
684 for tst_name in tbl_dict.keys():
685 item = [tbl_dict[tst_name]["name"], ]
687 if tbl_dict[tst_name].get("history", None) is not None:
688 for hist_data in tbl_dict[tst_name]["history"].values():
690 item.append(round(mean(hist_data) / 1000000, 2))
691 item.append(round(stdev(hist_data) / 1000000, 2))
693 item.extend(["Not tested", "Not tested"])
695 item.extend(["Not tested", "Not tested"])
696 data_t = tbl_dict[tst_name]["ref-data"]
698 item.append(round(mean(data_t) / 1000000, 2))
699 item.append(round(stdev(data_t) / 1000000, 2))
701 item.extend(["Not tested", "Not tested"])
702 data_t = tbl_dict[tst_name]["cmp-data"]
704 item.append(round(mean(data_t) / 1000000, 2))
705 item.append(round(stdev(data_t) / 1000000, 2))
707 item.extend(["Not tested", "Not tested"])
708 if item[-2] == "Not tested":
710 elif item[-4] == "Not tested":
711 item.append("New in CSIT-1908")
712 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
713 item.append("See footnote [1]")
716 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
717 if (len(item) == len(header)) and (item[-3] != "Not tested"):
721 # 1. New in CSIT-XXXX
728 if "New in CSIT" in item[-1]:
730 elif "See footnote" in item[-1]:
733 tbl_delta.append(item)
736 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
737 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
738 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
739 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
741 # Put the tables together:
743 tbl_lst.extend(tbl_new)
744 tbl_lst.extend(tbl_see)
745 tbl_lst.extend(tbl_delta)
747 # Generate csv tables:
748 csv_file = "{0}.csv".format(table["output-file"])
749 with open(csv_file, "w") as file_handler:
750 file_handler.write(header_str)
752 file_handler.write(",".join([str(item) for item in test]) + "\n")
754 txt_file_name = "{0}.txt".format(table["output-file"])
755 convert_csv_to_pretty_txt(csv_file, txt_file_name)
758 with open(txt_file_name, 'a') as txt_file:
759 txt_file.writelines([
761 "[1] CSIT-1908 changed test methodology of dot1q tests in "
762 "2-node testbeds, dot1q encapsulation is now used on both "
764 " Previously dot1q was used only on a single link with the "
765 "other link carrying untagged Ethernet frames. This changes "
767 " in slightly lower throughput in CSIT-1908 for these "
768 "tests. See release notes."
772 def table_nics_comparison(table, input_data):
773 """Generate the table(s) with algorithm: table_nics_comparison
774 specified in the specification file.
776 :param table: Table to generate.
777 :param input_data: Data to process.
778 :type table: pandas.Series
779 :type input_data: InputData
782 logging.info(" Generating the table {0} ...".
783 format(table.get("title", "")))
786 logging.info(" Creating the data set for the {0} '{1}'.".
787 format(table.get("type", ""), table.get("title", "")))
788 data = input_data.filter_data(table, continue_on_error=True)
790 # Prepare the header of the tables
792 header = ["Test case", ]
794 if table["include-tests"] == "MRR":
795 hdr_param = "Rec Rate"
800 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
801 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
802 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
803 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
805 header_str = ",".join(header) + "\n"
806 except (AttributeError, KeyError) as err:
807 logging.error("The model is invalid, missing parameter: {0}".
811 # Prepare data to the table:
813 for job, builds in table["data"].items():
815 for tst_name, tst_data in data[job][str(build)].iteritems():
816 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
817 replace("-ndrpdr", "").replace("-pdrdisc", "").\
818 replace("-ndrdisc", "").replace("-pdr", "").\
819 replace("-ndr", "").\
820 replace("1t1c", "1c").replace("2t1c", "1c").\
821 replace("2t2c", "2c").replace("4t2c", "2c").\
822 replace("4t4c", "4c").replace("8t4c", "4c")
823 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
824 if tbl_dict.get(tst_name_mod, None) is None:
825 name = "-".join(tst_data["name"].split("-")[:-1])
826 tbl_dict[tst_name_mod] = {"name": name,
830 if table["include-tests"] == "MRR":
831 result = tst_data["result"]["receive-rate"].avg
832 elif table["include-tests"] == "PDR":
833 result = tst_data["throughput"]["PDR"]["LOWER"]
834 elif table["include-tests"] == "NDR":
835 result = tst_data["throughput"]["NDR"]["LOWER"]
840 if table["reference"]["nic"] in tst_data["tags"]:
841 tbl_dict[tst_name_mod]["ref-data"].append(result)
842 elif table["compare"]["nic"] in tst_data["tags"]:
843 tbl_dict[tst_name_mod]["cmp-data"].append(result)
844 except (TypeError, KeyError) as err:
845 logging.debug("No data for {0}".format(tst_name))
846 logging.debug(repr(err))
847 # No data in output.xml for this test
850 for tst_name in tbl_dict.keys():
851 item = [tbl_dict[tst_name]["name"], ]
852 data_t = tbl_dict[tst_name]["ref-data"]
854 item.append(round(mean(data_t) / 1000000, 2))
855 item.append(round(stdev(data_t) / 1000000, 2))
857 item.extend([None, None])
858 data_t = tbl_dict[tst_name]["cmp-data"]
860 item.append(round(mean(data_t) / 1000000, 2))
861 item.append(round(stdev(data_t) / 1000000, 2))
863 item.extend([None, None])
864 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
865 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
866 if len(item) == len(header):
869 # Sort the table according to the relative change
870 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
872 # Generate csv tables:
873 csv_file = "{0}.csv".format(table["output-file"])
874 with open(csv_file, "w") as file_handler:
875 file_handler.write(header_str)
877 file_handler.write(",".join([str(item) for item in test]) + "\n")
879 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
882 def table_soak_vs_ndr(table, input_data):
883 """Generate the table(s) with algorithm: table_soak_vs_ndr
884 specified in the specification file.
886 :param table: Table to generate.
887 :param input_data: Data to process.
888 :type table: pandas.Series
889 :type input_data: InputData
892 logging.info(" Generating the table {0} ...".
893 format(table.get("title", "")))
896 logging.info(" Creating the data set for the {0} '{1}'.".
897 format(table.get("type", ""), table.get("title", "")))
898 data = input_data.filter_data(table, continue_on_error=True)
900 # Prepare the header of the table
904 "{0} Thput [Mpps]".format(table["reference"]["title"]),
905 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
906 "{0} Thput [Mpps]".format(table["compare"]["title"]),
907 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
908 "Delta [%]", "Stdev of delta [%]"]
909 header_str = ",".join(header) + "\n"
910 except (AttributeError, KeyError) as err:
911 logging.error("The model is invalid, missing parameter: {0}".
915 # Create a list of available SOAK test results:
917 for job, builds in table["compare"]["data"].items():
919 for tst_name, tst_data in data[job][str(build)].iteritems():
920 if tst_data["type"] == "SOAK":
921 tst_name_mod = tst_name.replace("-soak", "")
922 if tbl_dict.get(tst_name_mod, None) is None:
923 groups = re.search(REGEX_NIC, tst_data["parent"])
924 nic = groups.group(0) if groups else ""
925 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
927 tbl_dict[tst_name_mod] = {
933 tbl_dict[tst_name_mod]["cmp-data"].append(
934 tst_data["throughput"]["LOWER"])
935 except (KeyError, TypeError):
937 tests_lst = tbl_dict.keys()
939 # Add corresponding NDR test results:
940 for job, builds in table["reference"]["data"].items():
942 for tst_name, tst_data in data[job][str(build)].iteritems():
943 tst_name_mod = tst_name.replace("-ndrpdr", "").\
945 if tst_name_mod in tests_lst:
947 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
948 if table["include-tests"] == "MRR":
949 result = tst_data["result"]["receive-rate"].avg
950 elif table["include-tests"] == "PDR":
951 result = tst_data["throughput"]["PDR"]["LOWER"]
952 elif table["include-tests"] == "NDR":
953 result = tst_data["throughput"]["NDR"]["LOWER"]
956 if result is not None:
957 tbl_dict[tst_name_mod]["ref-data"].append(
959 except (KeyError, TypeError):
963 for tst_name in tbl_dict.keys():
964 item = [tbl_dict[tst_name]["name"], ]
965 data_r = tbl_dict[tst_name]["ref-data"]
967 data_r_mean = mean(data_r)
968 item.append(round(data_r_mean / 1000000, 2))
969 data_r_stdev = stdev(data_r)
970 item.append(round(data_r_stdev / 1000000, 2))
974 item.extend([None, None])
975 data_c = tbl_dict[tst_name]["cmp-data"]
977 data_c_mean = mean(data_c)
978 item.append(round(data_c_mean / 1000000, 2))
979 data_c_stdev = stdev(data_c)
980 item.append(round(data_c_stdev / 1000000, 2))
984 item.extend([None, None])
985 if data_r_mean and data_c_mean:
986 delta, d_stdev = relative_change_stdev(
987 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
988 item.append(round(delta, 2))
989 item.append(round(d_stdev, 2))
992 # Sort the table according to the relative change
993 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
995 # Generate csv tables:
996 csv_file = "{0}.csv".format(table["output-file"])
997 with open(csv_file, "w") as file_handler:
998 file_handler.write(header_str)
1000 file_handler.write(",".join([str(item) for item in test]) + "\n")
1002 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
1005 def table_performance_trending_dashboard(table, input_data):
1006 """Generate the table(s) with algorithm:
1007 table_performance_trending_dashboard
1008 specified in the specification file.
1010 :param table: Table to generate.
1011 :param input_data: Data to process.
1012 :type table: pandas.Series
1013 :type input_data: InputData
1016 logging.info(" Generating the table {0} ...".
1017 format(table.get("title", "")))
1019 # Transform the data
1020 logging.info(" Creating the data set for the {0} '{1}'.".
1021 format(table.get("type", ""), table.get("title", "")))
1022 data = input_data.filter_data(table, continue_on_error=True)
1024 # Prepare the header of the tables
1025 header = ["Test Case",
1027 "Short-Term Change [%]",
1028 "Long-Term Change [%]",
1032 header_str = ",".join(header) + "\n"
1034 # Prepare data to the table:
1036 for job, builds in table["data"].items():
1037 for build in builds:
1038 for tst_name, tst_data in data[job][str(build)].iteritems():
1039 if tst_name.lower() in table.get("ignore-list", list()):
1041 if tbl_dict.get(tst_name, None) is None:
1042 groups = re.search(REGEX_NIC, tst_data["parent"])
1045 nic = groups.group(0)
1046 tbl_dict[tst_name] = {
1047 "name": "{0}-{1}".format(nic, tst_data["name"]),
1048 "data": OrderedDict()}
1050 tbl_dict[tst_name]["data"][str(build)] = \
1051 tst_data["result"]["receive-rate"]
1052 except (TypeError, KeyError):
1053 pass # No data in output.xml for this test
1056 for tst_name in tbl_dict.keys():
1057 data_t = tbl_dict[tst_name]["data"]
1061 classification_lst, avgs = classify_anomalies(data_t)
1063 win_size = min(len(data_t), table["window"])
1064 long_win_size = min(len(data_t), table["long-trend-window"])
1068 [x for x in avgs[-long_win_size:-win_size]
1073 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1075 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1076 rel_change_last = nan
1078 rel_change_last = round(
1079 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1081 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1082 rel_change_long = nan
1084 rel_change_long = round(
1085 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1087 if classification_lst:
1088 if isnan(rel_change_last) and isnan(rel_change_long):
1090 if (isnan(last_avg) or
1091 isnan(rel_change_last) or
1092 isnan(rel_change_long)):
1095 [tbl_dict[tst_name]["name"],
1096 round(last_avg / 1000000, 2),
1099 classification_lst[-win_size:].count("regression"),
1100 classification_lst[-win_size:].count("progression")])
1102 tbl_lst.sort(key=lambda rel: rel[0])
1105 for nrr in range(table["window"], -1, -1):
1106 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1107 for nrp in range(table["window"], -1, -1):
1108 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1109 tbl_out.sort(key=lambda rel: rel[2])
1110 tbl_sorted.extend(tbl_out)
1112 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1114 logging.info(" Writing file: '{0}'".format(file_name))
1115 with open(file_name, "w") as file_handler:
1116 file_handler.write(header_str)
1117 for test in tbl_sorted:
1118 file_handler.write(",".join([str(item) for item in test]) + '\n')
1120 txt_file_name = "{0}.txt".format(table["output-file"])
1121 logging.info(" Writing file: '{0}'".format(txt_file_name))
1122 convert_csv_to_pretty_txt(file_name, txt_file_name)
1125 def _generate_url(base, testbed, test_name):
1126 """Generate URL to a trending plot from the name of the test case.
1128 :param base: The base part of URL common to all test cases.
1129 :param testbed: The testbed used for testing.
1130 :param test_name: The name of the test case.
1133 :type test_name: str
1134 :returns: The URL to the plot with the trending data for the given test
1144 if "lbdpdk" in test_name or "lbvpp" in test_name:
1145 file_name = "link_bonding"
1147 elif "114b" in test_name and "vhost" in test_name:
1150 elif "testpmd" in test_name or "l3fwd" in test_name:
1153 elif "memif" in test_name:
1154 file_name = "container_memif"
1157 elif "srv6" in test_name:
1160 elif "vhost" in test_name:
1161 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1162 file_name = "vm_vhost_l2"
1163 if "114b" in test_name:
1165 elif "l2xcbase" in test_name and "x520" in test_name:
1166 feature = "-base-l2xc"
1167 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1168 feature = "-base-l2bd"
1171 elif "ip4base" in test_name:
1172 file_name = "vm_vhost_ip4"
1175 elif "ipsecbasetnlsw" in test_name:
1176 file_name = "ipsecsw"
1177 feature = "-base-scale"
1179 elif "ipsec" in test_name:
1181 feature = "-base-scale"
1182 if "hw-" in test_name:
1183 file_name = "ipsechw"
1184 elif "sw-" in test_name:
1185 file_name = "ipsecsw"
1186 if "-int-" in test_name:
1187 feature = "-base-scale-int"
1188 elif "tnl" in test_name:
1189 feature = "-base-scale-tnl"
1191 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1192 file_name = "ip4_tunnels"
1195 elif "ip4base" in test_name or "ip4scale" in test_name:
1197 if "xl710" in test_name:
1198 feature = "-base-scale-features"
1199 elif "iacl" in test_name:
1200 feature = "-features-iacl"
1201 elif "oacl" in test_name:
1202 feature = "-features-oacl"
1203 elif "snat" in test_name or "cop" in test_name:
1204 feature = "-features"
1206 feature = "-base-scale"
1208 elif "ip6base" in test_name or "ip6scale" in test_name:
1210 feature = "-base-scale"
1212 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1213 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1214 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1216 if "macip" in test_name:
1217 feature = "-features-macip"
1218 elif "iacl" in test_name:
1219 feature = "-features-iacl"
1220 elif "oacl" in test_name:
1221 feature = "-features-oacl"
1223 feature = "-base-scale"
1225 if "x520" in test_name:
1227 elif "x710" in test_name:
1229 elif "xl710" in test_name:
1231 elif "xxv710" in test_name:
1233 elif "vic1227" in test_name:
1235 elif "vic1385" in test_name:
1237 elif "x553" in test_name:
1243 if "64b" in test_name:
1245 elif "78b" in test_name:
1247 elif "imix" in test_name:
1249 elif "9000b" in test_name:
1251 elif "1518b" in test_name:
1253 elif "114b" in test_name:
1257 anchor += framesize + '-'
1259 if "1t1c" in test_name:
1261 elif "2t2c" in test_name:
1263 elif "4t4c" in test_name:
1265 elif "2t1c" in test_name:
1267 elif "4t2c" in test_name:
1269 elif "8t4c" in test_name:
1272 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1273 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1276 def table_performance_trending_dashboard_html(table, input_data):
1277 """Generate the table(s) with algorithm:
1278 table_performance_trending_dashboard_html specified in the specification
1281 :param table: Table to generate.
1282 :param input_data: Data to process.
1284 :type input_data: InputData
1287 testbed = table.get("testbed", None)
1289 logging.error("The testbed is not defined for the table '{0}'.".
1290 format(table.get("title", "")))
1293 logging.info(" Generating the table {0} ...".
1294 format(table.get("title", "")))
1297 with open(table["input-file"], 'rb') as csv_file:
1298 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1299 csv_lst = [item for item in csv_content]
1301 logging.warning("The input file is not defined.")
1303 except csv.Error as err:
1304 logging.warning("Not possible to process the file '{0}'.\n{1}".
1305 format(table["input-file"], err))
1309 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1312 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1313 for idx, item in enumerate(csv_lst[0]):
1314 alignment = "left" if idx == 0 else "center"
1315 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1319 colors = {"regression": ("#ffcccc", "#ff9999"),
1320 "progression": ("#c6ecc6", "#9fdf9f"),
1321 "normal": ("#e9f1fb", "#d4e4f7")}
1322 for r_idx, row in enumerate(csv_lst[1:]):
1324 color = "regression"
1326 color = "progression"
1329 background = colors[color][r_idx % 2]
1330 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1333 for c_idx, item in enumerate(row):
1334 alignment = "left" if c_idx == 0 else "center"
1335 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1338 url = _generate_url("../trending/", testbed, item)
1339 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1344 with open(table["output-file"], 'w') as html_file:
1345 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1346 html_file.write(".. raw:: html\n\n\t")
1347 html_file.write(ET.tostring(dashboard))
1348 html_file.write("\n\t<p><br><br></p>\n")
1350 logging.warning("The output file is not defined.")
1354 def table_last_failed_tests(table, input_data):
1355 """Generate the table(s) with algorithm: table_last_failed_tests
1356 specified in the specification file.
1358 :param table: Table to generate.
1359 :param input_data: Data to process.
1360 :type table: pandas.Series
1361 :type input_data: InputData
1364 logging.info(" Generating the table {0} ...".
1365 format(table.get("title", "")))
1367 # Transform the data
1368 logging.info(" Creating the data set for the {0} '{1}'.".
1369 format(table.get("type", ""), table.get("title", "")))
1370 data = input_data.filter_data(table, continue_on_error=True)
1372 if data is None or data.empty:
1373 logging.warn(" No data for the {0} '{1}'.".
1374 format(table.get("type", ""), table.get("title", "")))
1378 for job, builds in table["data"].items():
1379 for build in builds:
1382 version = input_data.metadata(job, build).get("version", "")
1384 logging.error("Data for {job}: {build} is not present.".
1385 format(job=job, build=build))
1387 tbl_list.append(build)
1388 tbl_list.append(version)
1389 for tst_name, tst_data in data[job][build].iteritems():
1390 if tst_data["status"] != "FAIL":
1392 groups = re.search(REGEX_NIC, tst_data["parent"])
1395 nic = groups.group(0)
1396 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1398 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1399 logging.info(" Writing file: '{0}'".format(file_name))
1400 with open(file_name, "w") as file_handler:
1401 for test in tbl_list:
1402 file_handler.write(test + '\n')
1405 def table_failed_tests(table, input_data):
1406 """Generate the table(s) with algorithm: table_failed_tests
1407 specified in the specification file.
1409 :param table: Table to generate.
1410 :param input_data: Data to process.
1411 :type table: pandas.Series
1412 :type input_data: InputData
1415 logging.info(" Generating the table {0} ...".
1416 format(table.get("title", "")))
1418 # Transform the data
1419 logging.info(" Creating the data set for the {0} '{1}'.".
1420 format(table.get("type", ""), table.get("title", "")))
1421 data = input_data.filter_data(table, continue_on_error=True)
1423 # Prepare the header of the tables
1424 header = ["Test Case",
1426 "Last Failure [Time]",
1427 "Last Failure [VPP-Build-Id]",
1428 "Last Failure [CSIT-Job-Build-Id]"]
1430 # Generate the data for the table according to the model in the table
1434 timeperiod = timedelta(int(table.get("window", 7)))
1437 for job, builds in table["data"].items():
1438 for build in builds:
1440 for tst_name, tst_data in data[job][build].iteritems():
1441 if tst_name.lower() in table.get("ignore-list", list()):
1443 if tbl_dict.get(tst_name, None) is None:
1444 groups = re.search(REGEX_NIC, tst_data["parent"])
1447 nic = groups.group(0)
1448 tbl_dict[tst_name] = {
1449 "name": "{0}-{1}".format(nic, tst_data["name"]),
1450 "data": OrderedDict()}
1452 generated = input_data.metadata(job, build).\
1453 get("generated", "")
1456 then = dt.strptime(generated, "%Y%m%d %H:%M")
1457 if (now - then) <= timeperiod:
1458 tbl_dict[tst_name]["data"][build] = (
1461 input_data.metadata(job, build).get("version", ""),
1463 except (TypeError, KeyError) as err:
1464 logging.warning("tst_name: {} - err: {}".
1465 format(tst_name, repr(err)))
1469 for tst_data in tbl_dict.values():
1471 for val in tst_data["data"].values():
1472 if val[0] == "FAIL":
1474 fails_last_date = val[1]
1475 fails_last_vpp = val[2]
1476 fails_last_csit = val[3]
1478 max_fails = fails_nr if fails_nr > max_fails else max_fails
1479 tbl_lst.append([tst_data["name"],
1483 "mrr-daily-build-{0}".format(fails_last_csit)])
1485 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1487 for nrf in range(max_fails, -1, -1):
1488 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1489 tbl_sorted.extend(tbl_fails)
1490 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1492 logging.info(" Writing file: '{0}'".format(file_name))
1493 with open(file_name, "w") as file_handler:
1494 file_handler.write(",".join(header) + "\n")
1495 for test in tbl_sorted:
1496 file_handler.write(",".join([str(item) for item in test]) + '\n')
1498 txt_file_name = "{0}.txt".format(table["output-file"])
1499 logging.info(" Writing file: '{0}'".format(txt_file_name))
1500 convert_csv_to_pretty_txt(file_name, txt_file_name)
1503 def table_failed_tests_html(table, input_data):
1504 """Generate the table(s) with algorithm: table_failed_tests_html
1505 specified in the specification file.
1507 :param table: Table to generate.
1508 :param input_data: Data to process.
1509 :type table: pandas.Series
1510 :type input_data: InputData
1513 testbed = table.get("testbed", None)
1515 logging.error("The testbed is not defined for the table '{0}'.".
1516 format(table.get("title", "")))
1519 logging.info(" Generating the table {0} ...".
1520 format(table.get("title", "")))
1523 with open(table["input-file"], 'rb') as csv_file:
1524 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1525 csv_lst = [item for item in csv_content]
1527 logging.warning("The input file is not defined.")
1529 except csv.Error as err:
1530 logging.warning("Not possible to process the file '{0}'.\n{1}".
1531 format(table["input-file"], err))
1535 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1538 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1539 for idx, item in enumerate(csv_lst[0]):
1540 alignment = "left" if idx == 0 else "center"
1541 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1545 colors = ("#e9f1fb", "#d4e4f7")
1546 for r_idx, row in enumerate(csv_lst[1:]):
1547 background = colors[r_idx % 2]
1548 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1551 for c_idx, item in enumerate(row):
1552 alignment = "left" if c_idx == 0 else "center"
1553 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1556 url = _generate_url("../trending/", testbed, item)
1557 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1562 with open(table["output-file"], 'w') as html_file:
1563 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1564 html_file.write(".. raw:: html\n\n\t")
1565 html_file.write(ET.tostring(failed_tests))
1566 html_file.write("\n\t<p><br><br></p>\n")
1568 logging.warning("The output file is not defined.")