1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Rec Rate"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
238 topo = "2n-skx" if "2n-skx" in job else ""
240 for tst_name, tst_data in data[job][str(build)].iteritems():
241 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242 replace("-ndrpdr", "").replace("-pdrdisc", "").\
243 replace("-ndrdisc", "").replace("-pdr", "").\
244 replace("-ndr", "").\
245 replace("1t1c", "1c").replace("2t1c", "1c").\
246 replace("2t2c", "2c").replace("4t2c", "2c").\
247 replace("4t4c", "4c").replace("8t4c", "4c")
248 if "across topologies" in table["title"].lower():
249 tst_name_mod = tst_name_mod.replace("2n1l-", "")
250 if tbl_dict.get(tst_name_mod, None) is None:
251 groups = re.search(REGEX_NIC, tst_data["parent"])
252 nic = groups.group(0) if groups else ""
253 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
255 if "across testbeds" in table["title"].lower() or \
256 "across topologies" in table["title"].lower():
258 replace("1t1c", "1c").replace("2t1c", "1c").\
259 replace("2t2c", "2c").replace("4t2c", "2c").\
260 replace("4t4c", "4c").replace("8t4c", "4c")
261 tbl_dict[tst_name_mod] = {"name": name,
265 # TODO: Re-work when NDRPDRDISC tests are not used
266 if table["include-tests"] == "MRR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["result"]["receive-rate"].avg)
269 elif table["include-tests"] == "PDR":
270 if tst_data["type"] == "PDR":
271 tbl_dict[tst_name_mod]["ref-data"]. \
272 append(tst_data["throughput"]["value"])
273 elif tst_data["type"] == "NDRPDR":
274 tbl_dict[tst_name_mod]["ref-data"].append(
275 tst_data["throughput"]["PDR"]["LOWER"])
276 elif table["include-tests"] == "NDR":
277 if tst_data["type"] == "NDR":
278 tbl_dict[tst_name_mod]["ref-data"]. \
279 append(tst_data["throughput"]["value"])
280 elif tst_data["type"] == "NDRPDR":
281 tbl_dict[tst_name_mod]["ref-data"].append(
282 tst_data["throughput"]["NDR"]["LOWER"])
286 pass # No data in output.xml for this test
288 for job, builds in table["compare"]["data"].items():
290 for tst_name, tst_data in data[job][str(build)].iteritems():
291 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293 replace("-ndrdisc", "").replace("-pdr", ""). \
294 replace("-ndr", "").\
295 replace("1t1c", "1c").replace("2t1c", "1c").\
296 replace("2t2c", "2c").replace("4t2c", "2c").\
297 replace("4t4c", "4c").replace("8t4c", "4c")
298 if "across topologies" in table["title"].lower():
299 tst_name_mod = tst_name_mod.replace("2n1l-", "")
300 if tbl_dict.get(tst_name_mod, None) is None:
301 groups = re.search(REGEX_NIC, tst_data["parent"])
302 nic = groups.group(0) if groups else ""
303 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
305 if "across testbeds" in table["title"].lower() or \
306 "across topologies" in table["title"].lower():
308 replace("1t1c", "1c").replace("2t1c", "1c").\
309 replace("2t2c", "2c").replace("4t2c", "2c").\
310 replace("4t4c", "4c").replace("8t4c", "4c")
311 tbl_dict[tst_name_mod] = {"name": name,
315 # TODO: Re-work when NDRPDRDISC tests are not used
316 if table["include-tests"] == "MRR":
317 tbl_dict[tst_name_mod]["cmp-data"]. \
318 append(tst_data["result"]["receive-rate"].avg)
319 elif table["include-tests"] == "PDR":
320 if tst_data["type"] == "PDR":
321 tbl_dict[tst_name_mod]["cmp-data"]. \
322 append(tst_data["throughput"]["value"])
323 elif tst_data["type"] == "NDRPDR":
324 tbl_dict[tst_name_mod]["cmp-data"].append(
325 tst_data["throughput"]["PDR"]["LOWER"])
326 elif table["include-tests"] == "NDR":
327 if tst_data["type"] == "NDR":
328 tbl_dict[tst_name_mod]["cmp-data"]. \
329 append(tst_data["throughput"]["value"])
330 elif tst_data["type"] == "NDRPDR":
331 tbl_dict[tst_name_mod]["cmp-data"].append(
332 tst_data["throughput"]["NDR"]["LOWER"])
335 except (KeyError, TypeError):
339 for job, builds in item["data"].items():
341 for tst_name, tst_data in data[job][str(build)].iteritems():
342 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344 replace("-ndrdisc", "").replace("-pdr", ""). \
345 replace("-ndr", "").\
346 replace("1t1c", "1c").replace("2t1c", "1c").\
347 replace("2t2c", "2c").replace("4t2c", "2c").\
348 replace("4t4c", "4c").replace("8t4c", "4c")
349 if "across topologies" in table["title"].lower():
350 tst_name_mod = tst_name_mod.replace("2n1l-", "")
351 if tbl_dict.get(tst_name_mod, None) is None:
353 if tbl_dict[tst_name_mod].get("history", None) is None:
354 tbl_dict[tst_name_mod]["history"] = OrderedDict()
355 if tbl_dict[tst_name_mod]["history"].get(item["title"],
357 tbl_dict[tst_name_mod]["history"][item["title"]] = \
360 # TODO: Re-work when NDRPDRDISC tests are not used
361 if table["include-tests"] == "MRR":
362 tbl_dict[tst_name_mod]["history"][item["title"
363 ]].append(tst_data["result"]["receive-rate"].
365 elif table["include-tests"] == "PDR":
366 if tst_data["type"] == "PDR":
367 tbl_dict[tst_name_mod]["history"][
369 append(tst_data["throughput"]["value"])
370 elif tst_data["type"] == "NDRPDR":
371 tbl_dict[tst_name_mod]["history"][item[
372 "title"]].append(tst_data["throughput"][
374 elif table["include-tests"] == "NDR":
375 if tst_data["type"] == "NDR":
376 tbl_dict[tst_name_mod]["history"][
378 append(tst_data["throughput"]["value"])
379 elif tst_data["type"] == "NDRPDR":
380 tbl_dict[tst_name_mod]["history"][item[
381 "title"]].append(tst_data["throughput"][
385 except (TypeError, KeyError):
390 for tst_name in tbl_dict.keys():
391 item = [tbl_dict[tst_name]["name"], ]
393 if tbl_dict[tst_name].get("history", None) is not None:
394 for hist_data in tbl_dict[tst_name]["history"].values():
396 item.append(round(mean(hist_data) / 1000000, 2))
397 item.append(round(stdev(hist_data) / 1000000, 2))
399 item.extend(["Not tested", "Not tested"])
401 item.extend(["Not tested", "Not tested"])
402 data_t = tbl_dict[tst_name]["ref-data"]
404 item.append(round(mean(data_t) / 1000000, 2))
405 item.append(round(stdev(data_t) / 1000000, 2))
407 item.extend(["Not tested", "Not tested"])
408 data_t = tbl_dict[tst_name]["cmp-data"]
410 item.append(round(mean(data_t) / 1000000, 2))
411 item.append(round(stdev(data_t) / 1000000, 2))
413 item.extend(["Not tested", "Not tested"])
414 if item[-2] == "Not tested":
416 elif item[-4] == "Not tested":
417 item.append("New in CSIT-1908")
418 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419 item.append("See footnote [1]")
422 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
423 if (len(item) == len(header)) and (item[-3] != "Not tested"):
427 # 1. New in CSIT-XXXX
434 if isinstance(item[-1], str):
435 if "New in CSIT" in item[-1]:
437 elif "See footnote" in item[-1]:
440 tbl_delta.append(item)
443 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
444 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
445 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
446 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
448 # Put the tables together:
450 tbl_lst.extend(tbl_new)
451 tbl_lst.extend(tbl_see)
452 tbl_lst.extend(tbl_delta)
454 # Generate csv tables:
455 csv_file = "{0}.csv".format(table["output-file"])
456 with open(csv_file, "w") as file_handler:
457 file_handler.write(header_str)
459 file_handler.write(",".join([str(item) for item in test]) + "\n")
461 txt_file_name = "{0}.txt".format(table["output-file"])
462 convert_csv_to_pretty_txt(csv_file, txt_file_name)
465 with open(txt_file_name, 'a') as txt_file:
466 txt_file.writelines([
468 "[1] CSIT-1908 changed test methodology of dot1q tests in "
469 "2-node testbeds, dot1q encapsulation is now used on both "
471 " Previously dot1q was used only on a single link with the "
472 "other link carrying untagged Ethernet frames. This changes "
474 " in slightly lower throughput in CSIT-1908 for these "
475 "tests. See release notes."
479 def table_performance_comparison_nic(table, input_data):
480 """Generate the table(s) with algorithm: table_performance_comparison
481 specified in the specification file.
483 :param table: Table to generate.
484 :param input_data: Data to process.
485 :type table: pandas.Series
486 :type input_data: InputData
489 logging.info(" Generating the table {0} ...".
490 format(table.get("title", "")))
493 logging.info(" Creating the data set for the {0} '{1}'.".
494 format(table.get("type", ""), table.get("title", "")))
495 data = input_data.filter_data(table, continue_on_error=True)
497 # Prepare the header of the tables
499 header = ["Test case", ]
501 if table["include-tests"] == "MRR":
502 hdr_param = "Rec Rate"
506 history = table.get("history", None)
510 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
511 "{0} Stdev [Mpps]".format(item["title"])])
513 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
514 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
515 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
516 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
518 header_str = ",".join(header) + "\n"
519 except (AttributeError, KeyError) as err:
520 logging.error("The model is invalid, missing parameter: {0}".
524 # Prepare data to the table:
526 for job, builds in table["reference"]["data"].items():
527 topo = "2n-skx" if "2n-skx" in job else ""
529 for tst_name, tst_data in data[job][str(build)].iteritems():
530 if table["reference"]["nic"] not in tst_data["tags"]:
532 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
533 replace("-ndrpdr", "").replace("-pdrdisc", "").\
534 replace("-ndrdisc", "").replace("-pdr", "").\
535 replace("-ndr", "").\
536 replace("1t1c", "1c").replace("2t1c", "1c").\
537 replace("2t2c", "2c").replace("4t2c", "2c").\
538 replace("4t4c", "4c").replace("8t4c", "4c")
539 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
540 if "across topologies" in table["title"].lower():
541 tst_name_mod = tst_name_mod.replace("2n1l-", "")
542 if tbl_dict.get(tst_name_mod, None) is None:
543 name = "{0}".format("-".join(tst_data["name"].
545 if "across testbeds" in table["title"].lower() or \
546 "across topologies" in table["title"].lower():
548 replace("1t1c", "1c").replace("2t1c", "1c").\
549 replace("2t2c", "2c").replace("4t2c", "2c").\
550 replace("4t4c", "4c").replace("8t4c", "4c")
551 tbl_dict[tst_name_mod] = {"name": name,
555 # TODO: Re-work when NDRPDRDISC tests are not used
556 if table["include-tests"] == "MRR":
557 tbl_dict[tst_name_mod]["ref-data"]. \
558 append(tst_data["result"]["receive-rate"].avg)
559 elif table["include-tests"] == "PDR":
560 if tst_data["type"] == "PDR":
561 tbl_dict[tst_name_mod]["ref-data"]. \
562 append(tst_data["throughput"]["value"])
563 elif tst_data["type"] == "NDRPDR":
564 tbl_dict[tst_name_mod]["ref-data"].append(
565 tst_data["throughput"]["PDR"]["LOWER"])
566 elif table["include-tests"] == "NDR":
567 if tst_data["type"] == "NDR":
568 tbl_dict[tst_name_mod]["ref-data"]. \
569 append(tst_data["throughput"]["value"])
570 elif tst_data["type"] == "NDRPDR":
571 tbl_dict[tst_name_mod]["ref-data"].append(
572 tst_data["throughput"]["NDR"]["LOWER"])
576 pass # No data in output.xml for this test
578 for job, builds in table["compare"]["data"].items():
580 for tst_name, tst_data in data[job][str(build)].iteritems():
581 if table["compare"]["nic"] not in tst_data["tags"]:
583 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
584 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
585 replace("-ndrdisc", "").replace("-pdr", ""). \
586 replace("-ndr", "").\
587 replace("1t1c", "1c").replace("2t1c", "1c").\
588 replace("2t2c", "2c").replace("4t2c", "2c").\
589 replace("4t4c", "4c").replace("8t4c", "4c")
590 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
591 if "across topologies" in table["title"].lower():
592 tst_name_mod = tst_name_mod.replace("2n1l-", "")
593 if tbl_dict.get(tst_name_mod, None) is None:
594 name = "{0}".format("-".join(tst_data["name"].
596 if "across testbeds" in table["title"].lower() or \
597 "across topologies" in table["title"].lower():
599 replace("1t1c", "1c").replace("2t1c", "1c").\
600 replace("2t2c", "2c").replace("4t2c", "2c").\
601 replace("4t4c", "4c").replace("8t4c", "4c")
602 tbl_dict[tst_name_mod] = {"name": name,
606 # TODO: Re-work when NDRPDRDISC tests are not used
607 if table["include-tests"] == "MRR":
608 tbl_dict[tst_name_mod]["cmp-data"]. \
609 append(tst_data["result"]["receive-rate"].avg)
610 elif table["include-tests"] == "PDR":
611 if tst_data["type"] == "PDR":
612 tbl_dict[tst_name_mod]["cmp-data"]. \
613 append(tst_data["throughput"]["value"])
614 elif tst_data["type"] == "NDRPDR":
615 tbl_dict[tst_name_mod]["cmp-data"].append(
616 tst_data["throughput"]["PDR"]["LOWER"])
617 elif table["include-tests"] == "NDR":
618 if tst_data["type"] == "NDR":
619 tbl_dict[tst_name_mod]["cmp-data"]. \
620 append(tst_data["throughput"]["value"])
621 elif tst_data["type"] == "NDRPDR":
622 tbl_dict[tst_name_mod]["cmp-data"].append(
623 tst_data["throughput"]["NDR"]["LOWER"])
626 except (KeyError, TypeError):
631 for job, builds in item["data"].items():
633 for tst_name, tst_data in data[job][str(build)].iteritems():
634 if item["nic"] not in tst_data["tags"]:
636 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
637 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
638 replace("-ndrdisc", "").replace("-pdr", ""). \
639 replace("-ndr", "").\
640 replace("1t1c", "1c").replace("2t1c", "1c").\
641 replace("2t2c", "2c").replace("4t2c", "2c").\
642 replace("4t4c", "4c").replace("8t4c", "4c")
643 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
644 if "across topologies" in table["title"].lower():
645 tst_name_mod = tst_name_mod.replace("2n1l-", "")
646 if tbl_dict.get(tst_name_mod, None) is None:
648 if tbl_dict[tst_name_mod].get("history", None) is None:
649 tbl_dict[tst_name_mod]["history"] = OrderedDict()
650 if tbl_dict[tst_name_mod]["history"].get(item["title"],
652 tbl_dict[tst_name_mod]["history"][item["title"]] = \
655 # TODO: Re-work when NDRPDRDISC tests are not used
656 if table["include-tests"] == "MRR":
657 tbl_dict[tst_name_mod]["history"][item["title"
658 ]].append(tst_data["result"]["receive-rate"].
660 elif table["include-tests"] == "PDR":
661 if tst_data["type"] == "PDR":
662 tbl_dict[tst_name_mod]["history"][
664 append(tst_data["throughput"]["value"])
665 elif tst_data["type"] == "NDRPDR":
666 tbl_dict[tst_name_mod]["history"][item[
667 "title"]].append(tst_data["throughput"][
669 elif table["include-tests"] == "NDR":
670 if tst_data["type"] == "NDR":
671 tbl_dict[tst_name_mod]["history"][
673 append(tst_data["throughput"]["value"])
674 elif tst_data["type"] == "NDRPDR":
675 tbl_dict[tst_name_mod]["history"][item[
676 "title"]].append(tst_data["throughput"][
680 except (TypeError, KeyError):
685 for tst_name in tbl_dict.keys():
686 item = [tbl_dict[tst_name]["name"], ]
688 if tbl_dict[tst_name].get("history", None) is not None:
689 for hist_data in tbl_dict[tst_name]["history"].values():
691 item.append(round(mean(hist_data) / 1000000, 2))
692 item.append(round(stdev(hist_data) / 1000000, 2))
694 item.extend(["Not tested", "Not tested"])
696 item.extend(["Not tested", "Not tested"])
697 data_t = tbl_dict[tst_name]["ref-data"]
699 item.append(round(mean(data_t) / 1000000, 2))
700 item.append(round(stdev(data_t) / 1000000, 2))
702 item.extend(["Not tested", "Not tested"])
703 data_t = tbl_dict[tst_name]["cmp-data"]
705 item.append(round(mean(data_t) / 1000000, 2))
706 item.append(round(stdev(data_t) / 1000000, 2))
708 item.extend(["Not tested", "Not tested"])
709 if item[-2] == "Not tested":
711 elif item[-4] == "Not tested":
712 item.append("New in CSIT-1908")
713 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
714 item.append("See footnote [1]")
717 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
718 if (len(item) == len(header)) and (item[-3] != "Not tested"):
722 # 1. New in CSIT-XXXX
729 if isinstance(item[-1], str):
730 if "New in CSIT" in item[-1]:
732 elif "See footnote" in item[-1]:
735 tbl_delta.append(item)
738 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
739 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
740 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
741 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
743 # Put the tables together:
745 tbl_lst.extend(tbl_new)
746 tbl_lst.extend(tbl_see)
747 tbl_lst.extend(tbl_delta)
749 # Generate csv tables:
750 csv_file = "{0}.csv".format(table["output-file"])
751 with open(csv_file, "w") as file_handler:
752 file_handler.write(header_str)
754 file_handler.write(",".join([str(item) for item in test]) + "\n")
756 txt_file_name = "{0}.txt".format(table["output-file"])
757 convert_csv_to_pretty_txt(csv_file, txt_file_name)
760 with open(txt_file_name, 'a') as txt_file:
761 txt_file.writelines([
763 "[1] CSIT-1908 changed test methodology of dot1q tests in "
764 "2-node testbeds, dot1q encapsulation is now used on both "
766 " Previously dot1q was used only on a single link with the "
767 "other link carrying untagged Ethernet frames. This changes "
769 " in slightly lower throughput in CSIT-1908 for these "
770 "tests. See release notes."
774 def table_nics_comparison(table, input_data):
775 """Generate the table(s) with algorithm: table_nics_comparison
776 specified in the specification file.
778 :param table: Table to generate.
779 :param input_data: Data to process.
780 :type table: pandas.Series
781 :type input_data: InputData
784 logging.info(" Generating the table {0} ...".
785 format(table.get("title", "")))
788 logging.info(" Creating the data set for the {0} '{1}'.".
789 format(table.get("type", ""), table.get("title", "")))
790 data = input_data.filter_data(table, continue_on_error=True)
792 # Prepare the header of the tables
794 header = ["Test case", ]
796 if table["include-tests"] == "MRR":
797 hdr_param = "Rec Rate"
802 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
803 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
804 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
805 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
807 header_str = ",".join(header) + "\n"
808 except (AttributeError, KeyError) as err:
809 logging.error("The model is invalid, missing parameter: {0}".
813 # Prepare data to the table:
815 for job, builds in table["data"].items():
817 for tst_name, tst_data in data[job][str(build)].iteritems():
818 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
819 replace("-ndrpdr", "").replace("-pdrdisc", "").\
820 replace("-ndrdisc", "").replace("-pdr", "").\
821 replace("-ndr", "").\
822 replace("1t1c", "1c").replace("2t1c", "1c").\
823 replace("2t2c", "2c").replace("4t2c", "2c").\
824 replace("4t4c", "4c").replace("8t4c", "4c")
825 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
826 if tbl_dict.get(tst_name_mod, None) is None:
827 name = "-".join(tst_data["name"].split("-")[:-1])
828 tbl_dict[tst_name_mod] = {"name": name,
832 if table["include-tests"] == "MRR":
833 result = tst_data["result"]["receive-rate"].avg
834 elif table["include-tests"] == "PDR":
835 result = tst_data["throughput"]["PDR"]["LOWER"]
836 elif table["include-tests"] == "NDR":
837 result = tst_data["throughput"]["NDR"]["LOWER"]
842 if table["reference"]["nic"] in tst_data["tags"]:
843 tbl_dict[tst_name_mod]["ref-data"].append(result)
844 elif table["compare"]["nic"] in tst_data["tags"]:
845 tbl_dict[tst_name_mod]["cmp-data"].append(result)
846 except (TypeError, KeyError) as err:
847 logging.debug("No data for {0}".format(tst_name))
848 logging.debug(repr(err))
849 # No data in output.xml for this test
852 for tst_name in tbl_dict.keys():
853 item = [tbl_dict[tst_name]["name"], ]
854 data_t = tbl_dict[tst_name]["ref-data"]
856 item.append(round(mean(data_t) / 1000000, 2))
857 item.append(round(stdev(data_t) / 1000000, 2))
859 item.extend([None, None])
860 data_t = tbl_dict[tst_name]["cmp-data"]
862 item.append(round(mean(data_t) / 1000000, 2))
863 item.append(round(stdev(data_t) / 1000000, 2))
865 item.extend([None, None])
866 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
867 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
868 if len(item) == len(header):
871 # Sort the table according to the relative change
872 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
874 # Generate csv tables:
875 csv_file = "{0}.csv".format(table["output-file"])
876 with open(csv_file, "w") as file_handler:
877 file_handler.write(header_str)
879 file_handler.write(",".join([str(item) for item in test]) + "\n")
881 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
884 def table_soak_vs_ndr(table, input_data):
885 """Generate the table(s) with algorithm: table_soak_vs_ndr
886 specified in the specification file.
888 :param table: Table to generate.
889 :param input_data: Data to process.
890 :type table: pandas.Series
891 :type input_data: InputData
894 logging.info(" Generating the table {0} ...".
895 format(table.get("title", "")))
898 logging.info(" Creating the data set for the {0} '{1}'.".
899 format(table.get("type", ""), table.get("title", "")))
900 data = input_data.filter_data(table, continue_on_error=True)
902 # Prepare the header of the table
906 "{0} Thput [Mpps]".format(table["reference"]["title"]),
907 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
908 "{0} Thput [Mpps]".format(table["compare"]["title"]),
909 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
910 "Delta [%]", "Stdev of delta [%]"]
911 header_str = ",".join(header) + "\n"
912 except (AttributeError, KeyError) as err:
913 logging.error("The model is invalid, missing parameter: {0}".
917 # Create a list of available SOAK test results:
919 for job, builds in table["compare"]["data"].items():
921 for tst_name, tst_data in data[job][str(build)].iteritems():
922 if tst_data["type"] == "SOAK":
923 tst_name_mod = tst_name.replace("-soak", "")
924 if tbl_dict.get(tst_name_mod, None) is None:
925 groups = re.search(REGEX_NIC, tst_data["parent"])
926 nic = groups.group(0) if groups else ""
927 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
929 tbl_dict[tst_name_mod] = {
935 tbl_dict[tst_name_mod]["cmp-data"].append(
936 tst_data["throughput"]["LOWER"])
937 except (KeyError, TypeError):
939 tests_lst = tbl_dict.keys()
941 # Add corresponding NDR test results:
942 for job, builds in table["reference"]["data"].items():
944 for tst_name, tst_data in data[job][str(build)].iteritems():
945 tst_name_mod = tst_name.replace("-ndrpdr", "").\
947 if tst_name_mod in tests_lst:
949 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
950 if table["include-tests"] == "MRR":
951 result = tst_data["result"]["receive-rate"].avg
952 elif table["include-tests"] == "PDR":
953 result = tst_data["throughput"]["PDR"]["LOWER"]
954 elif table["include-tests"] == "NDR":
955 result = tst_data["throughput"]["NDR"]["LOWER"]
958 if result is not None:
959 tbl_dict[tst_name_mod]["ref-data"].append(
961 except (KeyError, TypeError):
965 for tst_name in tbl_dict.keys():
966 item = [tbl_dict[tst_name]["name"], ]
967 data_r = tbl_dict[tst_name]["ref-data"]
969 data_r_mean = mean(data_r)
970 item.append(round(data_r_mean / 1000000, 2))
971 data_r_stdev = stdev(data_r)
972 item.append(round(data_r_stdev / 1000000, 2))
976 item.extend([None, None])
977 data_c = tbl_dict[tst_name]["cmp-data"]
979 data_c_mean = mean(data_c)
980 item.append(round(data_c_mean / 1000000, 2))
981 data_c_stdev = stdev(data_c)
982 item.append(round(data_c_stdev / 1000000, 2))
986 item.extend([None, None])
987 if data_r_mean and data_c_mean:
988 delta, d_stdev = relative_change_stdev(
989 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
990 item.append(round(delta, 2))
991 item.append(round(d_stdev, 2))
994 # Sort the table according to the relative change
995 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
997 # Generate csv tables:
998 csv_file = "{0}.csv".format(table["output-file"])
999 with open(csv_file, "w") as file_handler:
1000 file_handler.write(header_str)
1001 for test in tbl_lst:
1002 file_handler.write(",".join([str(item) for item in test]) + "\n")
1004 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
1007 def table_performance_trending_dashboard(table, input_data):
1008 """Generate the table(s) with algorithm:
1009 table_performance_trending_dashboard
1010 specified in the specification file.
1012 :param table: Table to generate.
1013 :param input_data: Data to process.
1014 :type table: pandas.Series
1015 :type input_data: InputData
1018 logging.info(" Generating the table {0} ...".
1019 format(table.get("title", "")))
1021 # Transform the data
1022 logging.info(" Creating the data set for the {0} '{1}'.".
1023 format(table.get("type", ""), table.get("title", "")))
1024 data = input_data.filter_data(table, continue_on_error=True)
1026 # Prepare the header of the tables
1027 header = ["Test Case",
1029 "Short-Term Change [%]",
1030 "Long-Term Change [%]",
1034 header_str = ",".join(header) + "\n"
1036 # Prepare data to the table:
1038 for job, builds in table["data"].items():
1039 for build in builds:
1040 for tst_name, tst_data in data[job][str(build)].iteritems():
1041 if tst_name.lower() in table.get("ignore-list", list()):
1043 if tbl_dict.get(tst_name, None) is None:
1044 groups = re.search(REGEX_NIC, tst_data["parent"])
1047 nic = groups.group(0)
1048 tbl_dict[tst_name] = {
1049 "name": "{0}-{1}".format(nic, tst_data["name"]),
1050 "data": OrderedDict()}
1052 tbl_dict[tst_name]["data"][str(build)] = \
1053 tst_data["result"]["receive-rate"]
1054 except (TypeError, KeyError):
1055 pass # No data in output.xml for this test
1058 for tst_name in tbl_dict.keys():
1059 data_t = tbl_dict[tst_name]["data"]
1063 classification_lst, avgs = classify_anomalies(data_t)
1065 win_size = min(len(data_t), table["window"])
1066 long_win_size = min(len(data_t), table["long-trend-window"])
1070 [x for x in avgs[-long_win_size:-win_size]
1075 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1077 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1078 rel_change_last = nan
1080 rel_change_last = round(
1081 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1083 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1084 rel_change_long = nan
1086 rel_change_long = round(
1087 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1089 if classification_lst:
1090 if isnan(rel_change_last) and isnan(rel_change_long):
1092 if (isnan(last_avg) or
1093 isnan(rel_change_last) or
1094 isnan(rel_change_long)):
1097 [tbl_dict[tst_name]["name"],
1098 round(last_avg / 1000000, 2),
1101 classification_lst[-win_size:].count("regression"),
1102 classification_lst[-win_size:].count("progression")])
1104 tbl_lst.sort(key=lambda rel: rel[0])
1107 for nrr in range(table["window"], -1, -1):
1108 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1109 for nrp in range(table["window"], -1, -1):
1110 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1111 tbl_out.sort(key=lambda rel: rel[2])
1112 tbl_sorted.extend(tbl_out)
1114 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1116 logging.info(" Writing file: '{0}'".format(file_name))
1117 with open(file_name, "w") as file_handler:
1118 file_handler.write(header_str)
1119 for test in tbl_sorted:
1120 file_handler.write(",".join([str(item) for item in test]) + '\n')
1122 txt_file_name = "{0}.txt".format(table["output-file"])
1123 logging.info(" Writing file: '{0}'".format(txt_file_name))
1124 convert_csv_to_pretty_txt(file_name, txt_file_name)
1127 def _generate_url(base, testbed, test_name):
1128 """Generate URL to a trending plot from the name of the test case.
1130 :param base: The base part of URL common to all test cases.
1131 :param testbed: The testbed used for testing.
1132 :param test_name: The name of the test case.
1135 :type test_name: str
1136 :returns: The URL to the plot with the trending data for the given test
1146 if "lbdpdk" in test_name or "lbvpp" in test_name:
1147 file_name = "link_bonding"
1149 elif "114b" in test_name and "vhost" in test_name:
1152 elif "testpmd" in test_name or "l3fwd" in test_name:
1155 elif "memif" in test_name:
1156 file_name = "container_memif"
1159 elif "srv6" in test_name:
1162 elif "vhost" in test_name:
1163 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1164 file_name = "vm_vhost_l2"
1165 if "114b" in test_name:
1167 elif "l2xcbase" in test_name and "x520" in test_name:
1168 feature = "-base-l2xc"
1169 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1170 feature = "-base-l2bd"
1173 elif "ip4base" in test_name:
1174 file_name = "vm_vhost_ip4"
1177 elif "ipsecbasetnlsw" in test_name:
1178 file_name = "ipsecsw"
1179 feature = "-base-scale"
1181 elif "ipsec" in test_name:
1183 feature = "-base-scale"
1184 if "hw-" in test_name:
1185 file_name = "ipsechw"
1186 elif "sw-" in test_name:
1187 file_name = "ipsecsw"
1188 if "-int-" in test_name:
1189 feature = "-base-scale-int"
1190 elif "tnl" in test_name:
1191 feature = "-base-scale-tnl"
1193 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1194 file_name = "ip4_tunnels"
1197 elif "ip4base" in test_name or "ip4scale" in test_name:
1199 if "xl710" in test_name:
1200 feature = "-base-scale-features"
1201 elif "iacl" in test_name:
1202 feature = "-features-iacl"
1203 elif "oacl" in test_name:
1204 feature = "-features-oacl"
1205 elif "snat" in test_name or "cop" in test_name:
1206 feature = "-features"
1208 feature = "-base-scale"
1210 elif "ip6base" in test_name or "ip6scale" in test_name:
1212 feature = "-base-scale"
1214 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1215 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1216 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1218 if "macip" in test_name:
1219 feature = "-features-macip"
1220 elif "iacl" in test_name:
1221 feature = "-features-iacl"
1222 elif "oacl" in test_name:
1223 feature = "-features-oacl"
1225 feature = "-base-scale"
1227 if "x520" in test_name:
1229 elif "x710" in test_name:
1231 elif "xl710" in test_name:
1233 elif "xxv710" in test_name:
1235 elif "vic1227" in test_name:
1237 elif "vic1385" in test_name:
1239 elif "x553" in test_name:
1245 if "64b" in test_name:
1247 elif "78b" in test_name:
1249 elif "imix" in test_name:
1251 elif "9000b" in test_name:
1253 elif "1518b" in test_name:
1255 elif "114b" in test_name:
1259 anchor += framesize + '-'
1261 if "1t1c" in test_name:
1263 elif "2t2c" in test_name:
1265 elif "4t4c" in test_name:
1267 elif "2t1c" in test_name:
1269 elif "4t2c" in test_name:
1271 elif "8t4c" in test_name:
1274 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1275 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1278 def table_performance_trending_dashboard_html(table, input_data):
1279 """Generate the table(s) with algorithm:
1280 table_performance_trending_dashboard_html specified in the specification
1283 :param table: Table to generate.
1284 :param input_data: Data to process.
1286 :type input_data: InputData
1289 testbed = table.get("testbed", None)
1291 logging.error("The testbed is not defined for the table '{0}'.".
1292 format(table.get("title", "")))
1295 logging.info(" Generating the table {0} ...".
1296 format(table.get("title", "")))
1299 with open(table["input-file"], 'rb') as csv_file:
1300 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1301 csv_lst = [item for item in csv_content]
1303 logging.warning("The input file is not defined.")
1305 except csv.Error as err:
1306 logging.warning("Not possible to process the file '{0}'.\n{1}".
1307 format(table["input-file"], err))
1311 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1314 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1315 for idx, item in enumerate(csv_lst[0]):
1316 alignment = "left" if idx == 0 else "center"
1317 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1321 colors = {"regression": ("#ffcccc", "#ff9999"),
1322 "progression": ("#c6ecc6", "#9fdf9f"),
1323 "normal": ("#e9f1fb", "#d4e4f7")}
1324 for r_idx, row in enumerate(csv_lst[1:]):
1326 color = "regression"
1328 color = "progression"
1331 background = colors[color][r_idx % 2]
1332 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1335 for c_idx, item in enumerate(row):
1336 alignment = "left" if c_idx == 0 else "center"
1337 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1340 url = _generate_url("../trending/", testbed, item)
1341 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1346 with open(table["output-file"], 'w') as html_file:
1347 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1348 html_file.write(".. raw:: html\n\n\t")
1349 html_file.write(ET.tostring(dashboard))
1350 html_file.write("\n\t<p><br><br></p>\n")
1352 logging.warning("The output file is not defined.")
1356 def table_last_failed_tests(table, input_data):
1357 """Generate the table(s) with algorithm: table_last_failed_tests
1358 specified in the specification file.
1360 :param table: Table to generate.
1361 :param input_data: Data to process.
1362 :type table: pandas.Series
1363 :type input_data: InputData
1366 logging.info(" Generating the table {0} ...".
1367 format(table.get("title", "")))
1369 # Transform the data
1370 logging.info(" Creating the data set for the {0} '{1}'.".
1371 format(table.get("type", ""), table.get("title", "")))
1372 data = input_data.filter_data(table, continue_on_error=True)
1374 if data is None or data.empty:
1375 logging.warn(" No data for the {0} '{1}'.".
1376 format(table.get("type", ""), table.get("title", "")))
1380 for job, builds in table["data"].items():
1381 for build in builds:
1384 version = input_data.metadata(job, build).get("version", "")
1386 logging.error("Data for {job}: {build} is not present.".
1387 format(job=job, build=build))
1389 tbl_list.append(build)
1390 tbl_list.append(version)
1391 for tst_name, tst_data in data[job][build].iteritems():
1392 if tst_data["status"] != "FAIL":
1394 groups = re.search(REGEX_NIC, tst_data["parent"])
1397 nic = groups.group(0)
1398 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1400 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1401 logging.info(" Writing file: '{0}'".format(file_name))
1402 with open(file_name, "w") as file_handler:
1403 for test in tbl_list:
1404 file_handler.write(test + '\n')
1407 def table_failed_tests(table, input_data):
1408 """Generate the table(s) with algorithm: table_failed_tests
1409 specified in the specification file.
1411 :param table: Table to generate.
1412 :param input_data: Data to process.
1413 :type table: pandas.Series
1414 :type input_data: InputData
1417 logging.info(" Generating the table {0} ...".
1418 format(table.get("title", "")))
1420 # Transform the data
1421 logging.info(" Creating the data set for the {0} '{1}'.".
1422 format(table.get("type", ""), table.get("title", "")))
1423 data = input_data.filter_data(table, continue_on_error=True)
1425 # Prepare the header of the tables
1426 header = ["Test Case",
1428 "Last Failure [Time]",
1429 "Last Failure [VPP-Build-Id]",
1430 "Last Failure [CSIT-Job-Build-Id]"]
1432 # Generate the data for the table according to the model in the table
1436 timeperiod = timedelta(int(table.get("window", 7)))
1439 for job, builds in table["data"].items():
1440 for build in builds:
1442 for tst_name, tst_data in data[job][build].iteritems():
1443 if tst_name.lower() in table.get("ignore-list", list()):
1445 if tbl_dict.get(tst_name, None) is None:
1446 groups = re.search(REGEX_NIC, tst_data["parent"])
1449 nic = groups.group(0)
1450 tbl_dict[tst_name] = {
1451 "name": "{0}-{1}".format(nic, tst_data["name"]),
1452 "data": OrderedDict()}
1454 generated = input_data.metadata(job, build).\
1455 get("generated", "")
1458 then = dt.strptime(generated, "%Y%m%d %H:%M")
1459 if (now - then) <= timeperiod:
1460 tbl_dict[tst_name]["data"][build] = (
1463 input_data.metadata(job, build).get("version", ""),
1465 except (TypeError, KeyError) as err:
1466 logging.warning("tst_name: {} - err: {}".
1467 format(tst_name, repr(err)))
1471 for tst_data in tbl_dict.values():
1473 for val in tst_data["data"].values():
1474 if val[0] == "FAIL":
1476 fails_last_date = val[1]
1477 fails_last_vpp = val[2]
1478 fails_last_csit = val[3]
1480 max_fails = fails_nr if fails_nr > max_fails else max_fails
1481 tbl_lst.append([tst_data["name"],
1485 "mrr-daily-build-{0}".format(fails_last_csit)])
1487 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1489 for nrf in range(max_fails, -1, -1):
1490 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1491 tbl_sorted.extend(tbl_fails)
1492 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1494 logging.info(" Writing file: '{0}'".format(file_name))
1495 with open(file_name, "w") as file_handler:
1496 file_handler.write(",".join(header) + "\n")
1497 for test in tbl_sorted:
1498 file_handler.write(",".join([str(item) for item in test]) + '\n')
1500 txt_file_name = "{0}.txt".format(table["output-file"])
1501 logging.info(" Writing file: '{0}'".format(txt_file_name))
1502 convert_csv_to_pretty_txt(file_name, txt_file_name)
1505 def table_failed_tests_html(table, input_data):
1506 """Generate the table(s) with algorithm: table_failed_tests_html
1507 specified in the specification file.
1509 :param table: Table to generate.
1510 :param input_data: Data to process.
1511 :type table: pandas.Series
1512 :type input_data: InputData
1515 testbed = table.get("testbed", None)
1517 logging.error("The testbed is not defined for the table '{0}'.".
1518 format(table.get("title", "")))
1521 logging.info(" Generating the table {0} ...".
1522 format(table.get("title", "")))
1525 with open(table["input-file"], 'rb') as csv_file:
1526 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1527 csv_lst = [item for item in csv_content]
1529 logging.warning("The input file is not defined.")
1531 except csv.Error as err:
1532 logging.warning("Not possible to process the file '{0}'.\n{1}".
1533 format(table["input-file"], err))
1537 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1540 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1541 for idx, item in enumerate(csv_lst[0]):
1542 alignment = "left" if idx == 0 else "center"
1543 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1547 colors = ("#e9f1fb", "#d4e4f7")
1548 for r_idx, row in enumerate(csv_lst[1:]):
1549 background = colors[r_idx % 2]
1550 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1553 for c_idx, item in enumerate(row):
1554 alignment = "left" if c_idx == 0 else "center"
1555 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1558 url = _generate_url("../trending/", testbed, item)
1559 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1564 with open(table["output-file"], 'w') as html_file:
1565 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1566 html_file.write(".. raw:: html\n\n\t")
1567 html_file.write(ET.tostring(failed_tests))
1568 html_file.write("\n\t<p><br><br></p>\n")
1570 logging.warning("The output file is not defined.")