1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Rec Rate"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
238 topo = "2n-skx" if "2n-skx" in job else ""
240 for tst_name, tst_data in data[job][str(build)].iteritems():
241 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
242 replace("-ndrpdr", "").replace("-pdrdisc", "").\
243 replace("-ndrdisc", "").replace("-pdr", "").\
244 replace("-ndr", "").\
245 replace("1t1c", "1c").replace("2t1c", "1c").\
246 replace("2t2c", "2c").replace("4t2c", "2c").\
247 replace("4t4c", "4c").replace("8t4c", "4c")
248 if "across topologies" in table["title"].lower():
249 tst_name_mod = tst_name_mod.replace("2n1l-", "")
250 if tbl_dict.get(tst_name_mod, None) is None:
251 groups = re.search(REGEX_NIC, tst_data["parent"])
252 nic = groups.group(0) if groups else ""
253 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
255 if "across testbeds" in table["title"].lower() or \
256 "across topologies" in table["title"].lower():
258 replace("1t1c", "1c").replace("2t1c", "1c").\
259 replace("2t2c", "2c").replace("4t2c", "2c").\
260 replace("4t4c", "4c").replace("8t4c", "4c")
261 tbl_dict[tst_name_mod] = {"name": name,
265 # TODO: Re-work when NDRPDRDISC tests are not used
266 if table["include-tests"] == "MRR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["result"]["receive-rate"].avg)
269 elif table["include-tests"] == "PDR":
270 if tst_data["type"] == "PDR":
271 tbl_dict[tst_name_mod]["ref-data"]. \
272 append(tst_data["throughput"]["value"])
273 elif tst_data["type"] == "NDRPDR":
274 tbl_dict[tst_name_mod]["ref-data"].append(
275 tst_data["throughput"]["PDR"]["LOWER"])
276 elif table["include-tests"] == "NDR":
277 if tst_data["type"] == "NDR":
278 tbl_dict[tst_name_mod]["ref-data"]. \
279 append(tst_data["throughput"]["value"])
280 elif tst_data["type"] == "NDRPDR":
281 tbl_dict[tst_name_mod]["ref-data"].append(
282 tst_data["throughput"]["NDR"]["LOWER"])
286 pass # No data in output.xml for this test
288 for job, builds in table["compare"]["data"].items():
290 for tst_name, tst_data in data[job][str(build)].iteritems():
291 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
292 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
293 replace("-ndrdisc", "").replace("-pdr", ""). \
294 replace("-ndr", "").\
295 replace("1t1c", "1c").replace("2t1c", "1c").\
296 replace("2t2c", "2c").replace("4t2c", "2c").\
297 replace("4t4c", "4c").replace("8t4c", "4c")
298 if "across topologies" in table["title"].lower():
299 tst_name_mod = tst_name_mod.replace("2n1l-", "")
300 if tbl_dict.get(tst_name_mod, None) is None:
301 groups = re.search(REGEX_NIC, tst_data["parent"])
302 nic = groups.group(0) if groups else ""
303 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
305 if "across testbeds" in table["title"].lower() or \
306 "across topologies" in table["title"].lower():
308 replace("1t1c", "1c").replace("2t1c", "1c").\
309 replace("2t2c", "2c").replace("4t2c", "2c").\
310 replace("4t4c", "4c").replace("8t4c", "4c")
311 tbl_dict[tst_name_mod] = {"name": name,
315 # TODO: Re-work when NDRPDRDISC tests are not used
316 if table["include-tests"] == "MRR":
317 tbl_dict[tst_name_mod]["cmp-data"]. \
318 append(tst_data["result"]["receive-rate"].avg)
319 elif table["include-tests"] == "PDR":
320 if tst_data["type"] == "PDR":
321 tbl_dict[tst_name_mod]["cmp-data"]. \
322 append(tst_data["throughput"]["value"])
323 elif tst_data["type"] == "NDRPDR":
324 tbl_dict[tst_name_mod]["cmp-data"].append(
325 tst_data["throughput"]["PDR"]["LOWER"])
326 elif table["include-tests"] == "NDR":
327 if tst_data["type"] == "NDR":
328 tbl_dict[tst_name_mod]["cmp-data"]. \
329 append(tst_data["throughput"]["value"])
330 elif tst_data["type"] == "NDRPDR":
331 tbl_dict[tst_name_mod]["cmp-data"].append(
332 tst_data["throughput"]["NDR"]["LOWER"])
335 except (KeyError, TypeError):
339 for job, builds in item["data"].items():
341 for tst_name, tst_data in data[job][str(build)].iteritems():
342 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
343 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
344 replace("-ndrdisc", "").replace("-pdr", ""). \
345 replace("-ndr", "").\
346 replace("1t1c", "1c").replace("2t1c", "1c").\
347 replace("2t2c", "2c").replace("4t2c", "2c").\
348 replace("4t4c", "4c").replace("8t4c", "4c")
349 if "across topologies" in table["title"].lower():
350 tst_name_mod = tst_name_mod.replace("2n1l-", "")
351 if tbl_dict.get(tst_name_mod, None) is None:
353 if tbl_dict[tst_name_mod].get("history", None) is None:
354 tbl_dict[tst_name_mod]["history"] = OrderedDict()
355 if tbl_dict[tst_name_mod]["history"].get(item["title"],
357 tbl_dict[tst_name_mod]["history"][item["title"]] = \
360 # TODO: Re-work when NDRPDRDISC tests are not used
361 if table["include-tests"] == "MRR":
362 tbl_dict[tst_name_mod]["history"][item["title"
363 ]].append(tst_data["result"]["receive-rate"].
365 elif table["include-tests"] == "PDR":
366 if tst_data["type"] == "PDR":
367 tbl_dict[tst_name_mod]["history"][
369 append(tst_data["throughput"]["value"])
370 elif tst_data["type"] == "NDRPDR":
371 tbl_dict[tst_name_mod]["history"][item[
372 "title"]].append(tst_data["throughput"][
374 elif table["include-tests"] == "NDR":
375 if tst_data["type"] == "NDR":
376 tbl_dict[tst_name_mod]["history"][
378 append(tst_data["throughput"]["value"])
379 elif tst_data["type"] == "NDRPDR":
380 tbl_dict[tst_name_mod]["history"][item[
381 "title"]].append(tst_data["throughput"][
385 except (TypeError, KeyError):
390 for tst_name in tbl_dict.keys():
391 item = [tbl_dict[tst_name]["name"], ]
393 if tbl_dict[tst_name].get("history", None) is not None:
394 for hist_data in tbl_dict[tst_name]["history"].values():
396 item.append(round(mean(hist_data) / 1000000, 2))
397 item.append(round(stdev(hist_data) / 1000000, 2))
399 item.extend(["Not tested", "Not tested"])
401 item.extend(["Not tested", "Not tested"])
402 data_t = tbl_dict[tst_name]["ref-data"]
404 item.append(round(mean(data_t) / 1000000, 2))
405 item.append(round(stdev(data_t) / 1000000, 2))
407 item.extend(["Not tested", "Not tested"])
408 data_t = tbl_dict[tst_name]["cmp-data"]
410 item.append(round(mean(data_t) / 1000000, 2))
411 item.append(round(stdev(data_t) / 1000000, 2))
413 item.extend(["Not tested", "Not tested"])
414 if item[-2] == "Not tested":
416 elif item[-4] == "Not tested":
417 item.append("New in CSIT-1908")
418 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
419 item.append("See footnote [1]")
422 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
423 if (len(item) == len(header)) and (item[-3] != "Not tested"):
427 # 1. New in CSIT-XXXX
434 if "New in CSIT" in item[-1]:
436 elif "See footnote" in item[-1]:
439 tbl_delta.append(item)
442 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
443 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
444 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
445 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
447 # Put the tables together:
449 tbl_lst.extend(tbl_new)
450 tbl_lst.extend(tbl_see)
451 tbl_lst.extend(tbl_delta)
453 # Generate csv tables:
454 csv_file = "{0}.csv".format(table["output-file"])
455 with open(csv_file, "w") as file_handler:
456 file_handler.write(header_str)
458 file_handler.write(",".join([str(item) for item in test]) + "\n")
460 txt_file_name = "{0}.txt".format(table["output-file"])
461 convert_csv_to_pretty_txt(csv_file, txt_file_name)
464 with open(txt_file_name, 'a') as txt_file:
465 txt_file.writelines([
467 "[1] CSIT-1908 changed test methodology of dot1q tests in "
468 "2-node testbeds, dot1q encapsulation is now used on both "
470 " Previously dot1q was used only on a single link with the "
471 "other link carrying untagged Ethernet frames. This changes "
473 " in slightly lower throughput in CSIT-1908 for these "
474 "tests. See release notes."
478 def table_performance_comparison_nic(table, input_data):
479 """Generate the table(s) with algorithm: table_performance_comparison
480 specified in the specification file.
482 :param table: Table to generate.
483 :param input_data: Data to process.
484 :type table: pandas.Series
485 :type input_data: InputData
488 logging.info(" Generating the table {0} ...".
489 format(table.get("title", "")))
492 logging.info(" Creating the data set for the {0} '{1}'.".
493 format(table.get("type", ""), table.get("title", "")))
494 data = input_data.filter_data(table, continue_on_error=True)
496 # Prepare the header of the tables
498 header = ["Test case", ]
500 if table["include-tests"] == "MRR":
501 hdr_param = "Rec Rate"
505 history = table.get("history", None)
509 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
510 "{0} Stdev [Mpps]".format(item["title"])])
512 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
513 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
514 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
515 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
517 header_str = ",".join(header) + "\n"
518 except (AttributeError, KeyError) as err:
519 logging.error("The model is invalid, missing parameter: {0}".
523 # Prepare data to the table:
525 for job, builds in table["reference"]["data"].items():
526 topo = "2n-skx" if "2n-skx" in job else ""
528 for tst_name, tst_data in data[job][str(build)].iteritems():
529 if table["reference"]["nic"] not in tst_data["tags"]:
531 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
532 replace("-ndrpdr", "").replace("-pdrdisc", "").\
533 replace("-ndrdisc", "").replace("-pdr", "").\
534 replace("-ndr", "").\
535 replace("1t1c", "1c").replace("2t1c", "1c").\
536 replace("2t2c", "2c").replace("4t2c", "2c").\
537 replace("4t4c", "4c").replace("8t4c", "4c")
538 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
539 if "across topologies" in table["title"].lower():
540 tst_name_mod = tst_name_mod.replace("2n1l-", "")
541 if tbl_dict.get(tst_name_mod, None) is None:
542 name = "{0}".format("-".join(tst_data["name"].
544 if "across testbeds" in table["title"].lower() or \
545 "across topologies" in table["title"].lower():
547 replace("1t1c", "1c").replace("2t1c", "1c").\
548 replace("2t2c", "2c").replace("4t2c", "2c").\
549 replace("4t4c", "4c").replace("8t4c", "4c")
550 tbl_dict[tst_name_mod] = {"name": name,
554 # TODO: Re-work when NDRPDRDISC tests are not used
555 if table["include-tests"] == "MRR":
556 tbl_dict[tst_name_mod]["ref-data"]. \
557 append(tst_data["result"]["receive-rate"].avg)
558 elif table["include-tests"] == "PDR":
559 if tst_data["type"] == "PDR":
560 tbl_dict[tst_name_mod]["ref-data"]. \
561 append(tst_data["throughput"]["value"])
562 elif tst_data["type"] == "NDRPDR":
563 tbl_dict[tst_name_mod]["ref-data"].append(
564 tst_data["throughput"]["PDR"]["LOWER"])
565 elif table["include-tests"] == "NDR":
566 if tst_data["type"] == "NDR":
567 tbl_dict[tst_name_mod]["ref-data"]. \
568 append(tst_data["throughput"]["value"])
569 elif tst_data["type"] == "NDRPDR":
570 tbl_dict[tst_name_mod]["ref-data"].append(
571 tst_data["throughput"]["NDR"]["LOWER"])
575 pass # No data in output.xml for this test
577 for job, builds in table["compare"]["data"].items():
579 for tst_name, tst_data in data[job][str(build)].iteritems():
580 if table["compare"]["nic"] not in tst_data["tags"]:
582 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
583 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
584 replace("-ndrdisc", "").replace("-pdr", ""). \
585 replace("-ndr", "").\
586 replace("1t1c", "1c").replace("2t1c", "1c").\
587 replace("2t2c", "2c").replace("4t2c", "2c").\
588 replace("4t4c", "4c").replace("8t4c", "4c")
589 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
590 if "across topologies" in table["title"].lower():
591 tst_name_mod = tst_name_mod.replace("2n1l-", "")
592 if tbl_dict.get(tst_name_mod, None) is None:
593 name = "{0}".format("-".join(tst_data["name"].
595 if "across testbeds" in table["title"].lower() or \
596 "across topologies" in table["title"].lower():
598 replace("1t1c", "1c").replace("2t1c", "1c").\
599 replace("2t2c", "2c").replace("4t2c", "2c").\
600 replace("4t4c", "4c").replace("8t4c", "4c")
601 tbl_dict[tst_name_mod] = {"name": name,
605 # TODO: Re-work when NDRPDRDISC tests are not used
606 if table["include-tests"] == "MRR":
607 tbl_dict[tst_name_mod]["cmp-data"]. \
608 append(tst_data["result"]["receive-rate"].avg)
609 elif table["include-tests"] == "PDR":
610 if tst_data["type"] == "PDR":
611 tbl_dict[tst_name_mod]["cmp-data"]. \
612 append(tst_data["throughput"]["value"])
613 elif tst_data["type"] == "NDRPDR":
614 tbl_dict[tst_name_mod]["cmp-data"].append(
615 tst_data["throughput"]["PDR"]["LOWER"])
616 elif table["include-tests"] == "NDR":
617 if tst_data["type"] == "NDR":
618 tbl_dict[tst_name_mod]["cmp-data"]. \
619 append(tst_data["throughput"]["value"])
620 elif tst_data["type"] == "NDRPDR":
621 tbl_dict[tst_name_mod]["cmp-data"].append(
622 tst_data["throughput"]["NDR"]["LOWER"])
625 except (KeyError, TypeError):
630 for job, builds in item["data"].items():
632 for tst_name, tst_data in data[job][str(build)].iteritems():
633 if item["nic"] not in tst_data["tags"]:
635 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
636 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
637 replace("-ndrdisc", "").replace("-pdr", ""). \
638 replace("-ndr", "").\
639 replace("1t1c", "1c").replace("2t1c", "1c").\
640 replace("2t2c", "2c").replace("4t2c", "2c").\
641 replace("4t4c", "4c").replace("8t4c", "4c")
642 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
643 if "across topologies" in table["title"].lower():
644 tst_name_mod = tst_name_mod.replace("2n1l-", "")
645 if tbl_dict.get(tst_name_mod, None) is None:
647 if tbl_dict[tst_name_mod].get("history", None) is None:
648 tbl_dict[tst_name_mod]["history"] = OrderedDict()
649 if tbl_dict[tst_name_mod]["history"].get(item["title"],
651 tbl_dict[tst_name_mod]["history"][item["title"]] = \
654 # TODO: Re-work when NDRPDRDISC tests are not used
655 if table["include-tests"] == "MRR":
656 tbl_dict[tst_name_mod]["history"][item["title"
657 ]].append(tst_data["result"]["receive-rate"].
659 elif table["include-tests"] == "PDR":
660 if tst_data["type"] == "PDR":
661 tbl_dict[tst_name_mod]["history"][
663 append(tst_data["throughput"]["value"])
664 elif tst_data["type"] == "NDRPDR":
665 tbl_dict[tst_name_mod]["history"][item[
666 "title"]].append(tst_data["throughput"][
668 elif table["include-tests"] == "NDR":
669 if tst_data["type"] == "NDR":
670 tbl_dict[tst_name_mod]["history"][
672 append(tst_data["throughput"]["value"])
673 elif tst_data["type"] == "NDRPDR":
674 tbl_dict[tst_name_mod]["history"][item[
675 "title"]].append(tst_data["throughput"][
679 except (TypeError, KeyError):
684 for tst_name in tbl_dict.keys():
685 item = [tbl_dict[tst_name]["name"], ]
687 if tbl_dict[tst_name].get("history", None) is not None:
688 for hist_data in tbl_dict[tst_name]["history"].values():
690 item.append(round(mean(hist_data) / 1000000, 2))
691 item.append(round(stdev(hist_data) / 1000000, 2))
693 item.extend(["Not tested", "Not tested"])
695 item.extend(["Not tested", "Not tested"])
696 data_t = tbl_dict[tst_name]["ref-data"]
698 item.append(round(mean(data_t) / 1000000, 2))
699 item.append(round(stdev(data_t) / 1000000, 2))
701 item.extend(["Not tested", "Not tested"])
702 data_t = tbl_dict[tst_name]["cmp-data"]
704 item.append(round(mean(data_t) / 1000000, 2))
705 item.append(round(stdev(data_t) / 1000000, 2))
707 item.extend(["Not tested", "Not tested"])
708 if item[-2] == "Not tested":
710 elif item[-4] == "Not tested":
711 item.append("New in CSIT-1908")
712 elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]:
713 item.append("See footnote [1]")
716 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
717 if (len(item) == len(header)) and (item[-3] != "Not tested"):
721 # 1. New in CSIT-XXXX
728 if "New in CSIT" in item[-1]:
730 elif "See footnote" in item[-1]:
733 tbl_delta.append(item)
736 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
737 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
738 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
739 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
741 # Put the tables together:
743 tbl_lst.extend(tbl_new)
744 tbl_lst.extend(tbl_see)
745 tbl_lst.extend(tbl_delta)
747 # Generate csv tables:
748 csv_file = "{0}.csv".format(table["output-file"])
749 with open(csv_file, "w") as file_handler:
750 file_handler.write(header_str)
752 file_handler.write(",".join([str(item) for item in test]) + "\n")
754 txt_file_name = "{0}.txt".format(table["output-file"])
755 convert_csv_to_pretty_txt(csv_file, txt_file_name)
758 with open(txt_file_name, 'a') as txt_file:
759 txt_file.writelines([
761 "[1] CSIT-1908 changed test methodology of dot1q tests in "
762 "2-node testbeds, dot1q encapsulation is now used on both "
764 " Previously dot1q was used only on a single link with the "
765 "other link carrying untagged Ethernet frames. This changes "
767 " in slightly lower throughput in CSIT-1908 for these "
768 "tests. See release notes."
772 def table_nics_comparison(table, input_data):
773 """Generate the table(s) with algorithm: table_nics_comparison
774 specified in the specification file.
776 :param table: Table to generate.
777 :param input_data: Data to process.
778 :type table: pandas.Series
779 :type input_data: InputData
782 logging.info(" Generating the table {0} ...".
783 format(table.get("title", "")))
786 logging.info(" Creating the data set for the {0} '{1}'.".
787 format(table.get("type", ""), table.get("title", "")))
788 data = input_data.filter_data(table, continue_on_error=True)
790 # Prepare the header of the tables
792 header = ["Test case", ]
794 if table["include-tests"] == "MRR":
795 hdr_param = "Rec Rate"
800 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
801 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
802 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
803 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
805 header_str = ",".join(header) + "\n"
806 except (AttributeError, KeyError) as err:
807 logging.error("The model is invalid, missing parameter: {0}".
811 # Prepare data to the table:
813 for job, builds in table["data"].items():
815 for tst_name, tst_data in data[job][str(build)].iteritems():
816 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
817 replace("-ndrpdr", "").replace("-pdrdisc", "").\
818 replace("-ndrdisc", "").replace("-pdr", "").\
819 replace("-ndr", "").\
820 replace("1t1c", "1c").replace("2t1c", "1c").\
821 replace("2t2c", "2c").replace("4t2c", "2c").\
822 replace("4t4c", "4c").replace("8t4c", "4c")
823 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
824 if tbl_dict.get(tst_name_mod, None) is None:
825 name = "-".join(tst_data["name"].split("-")[:-1])
826 tbl_dict[tst_name_mod] = {"name": name,
830 if table["include-tests"] == "MRR":
831 result = tst_data["result"]["receive-rate"].avg
832 elif table["include-tests"] == "PDR":
833 result = tst_data["throughput"]["PDR"]["LOWER"]
834 elif table["include-tests"] == "NDR":
835 result = tst_data["throughput"]["NDR"]["LOWER"]
840 if table["reference"]["nic"] in tst_data["tags"]:
841 tbl_dict[tst_name_mod]["ref-data"].append(result)
842 elif table["compare"]["nic"] in tst_data["tags"]:
843 tbl_dict[tst_name_mod]["cmp-data"].append(result)
844 except (TypeError, KeyError) as err:
845 logging.debug("No data for {0}".format(tst_name))
846 logging.debug(repr(err))
847 # No data in output.xml for this test
850 for tst_name in tbl_dict.keys():
851 item = [tbl_dict[tst_name]["name"], ]
852 data_t = tbl_dict[tst_name]["ref-data"]
854 item.append(round(mean(data_t) / 1000000, 2))
855 item.append(round(stdev(data_t) / 1000000, 2))
857 item.extend([None, None])
858 data_t = tbl_dict[tst_name]["cmp-data"]
860 item.append(round(mean(data_t) / 1000000, 2))
861 item.append(round(stdev(data_t) / 1000000, 2))
863 item.extend([None, None])
864 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
865 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
866 if len(item) == len(header):
869 # Sort the table according to the relative change
870 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
872 # Generate csv tables:
873 csv_file = "{0}.csv".format(table["output-file"])
874 with open(csv_file, "w") as file_handler:
875 file_handler.write(header_str)
877 file_handler.write(",".join([str(item) for item in test]) + "\n")
879 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
882 def table_soak_vs_ndr(table, input_data):
883 """Generate the table(s) with algorithm: table_soak_vs_ndr
884 specified in the specification file.
886 :param table: Table to generate.
887 :param input_data: Data to process.
888 :type table: pandas.Series
889 :type input_data: InputData
892 logging.info(" Generating the table {0} ...".
893 format(table.get("title", "")))
896 logging.info(" Creating the data set for the {0} '{1}'.".
897 format(table.get("type", ""), table.get("title", "")))
898 data = input_data.filter_data(table, continue_on_error=True)
900 # Prepare the header of the table
904 "{0} Thput [Mpps]".format(table["reference"]["title"]),
905 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
906 "{0} Thput [Mpps]".format(table["compare"]["title"]),
907 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
908 "Delta [%]", "Stdev of delta [%]"]
909 header_str = ",".join(header) + "\n"
910 except (AttributeError, KeyError) as err:
911 logging.error("The model is invalid, missing parameter: {0}".
915 # Create a list of available SOAK test results:
917 for job, builds in table["compare"]["data"].items():
919 for tst_name, tst_data in data[job][str(build)].iteritems():
920 if tst_data["type"] == "SOAK":
921 tst_name_mod = tst_name.replace("-soak", "")
922 if tbl_dict.get(tst_name_mod, None) is None:
923 groups = re.search(REGEX_NIC, tst_data["parent"])
924 nic = groups.group(0) if groups else ""
925 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
927 tbl_dict[tst_name_mod] = {
933 tbl_dict[tst_name_mod]["cmp-data"].append(
934 tst_data["throughput"]["LOWER"])
935 except (KeyError, TypeError):
937 tests_lst = tbl_dict.keys()
939 # Add corresponding NDR test results:
940 for job, builds in table["reference"]["data"].items():
942 for tst_name, tst_data in data[job][str(build)].iteritems():
943 tst_name_mod = tst_name.replace("-ndrpdr", "").\
945 if tst_name_mod in tests_lst:
947 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
948 if table["include-tests"] == "MRR":
949 result = tst_data["result"]["receive-rate"].avg
950 elif table["include-tests"] == "PDR":
951 result = tst_data["throughput"]["PDR"]["LOWER"]
952 elif table["include-tests"] == "NDR":
953 result = tst_data["throughput"]["NDR"]["LOWER"]
956 if result is not None:
957 tbl_dict[tst_name_mod]["ref-data"].append(
959 except (KeyError, TypeError):
963 for tst_name in tbl_dict.keys():
964 item = [tbl_dict[tst_name]["name"], ]
965 data_r = tbl_dict[tst_name]["ref-data"]
967 data_r_mean = mean(data_r)
968 item.append(round(data_r_mean / 1000000, 2))
969 data_r_stdev = stdev(data_r)
970 item.append(round(data_r_stdev / 1000000, 2))
974 item.extend([None, None])
975 data_c = tbl_dict[tst_name]["cmp-data"]
977 data_c_mean = mean(data_c)
978 item.append(round(data_c_mean / 1000000, 2))
979 data_c_stdev = stdev(data_c)
980 item.append(round(data_c_stdev / 1000000, 2))
984 item.extend([None, None])
985 if data_r_mean and data_c_mean:
986 delta, d_stdev = relative_change_stdev(
987 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
988 item.append(round(delta, 2))
989 item.append(round(d_stdev, 2))
992 # Sort the table according to the relative change
993 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
995 # Generate csv tables:
996 csv_file = "{0}.csv".format(table["output-file"])
997 with open(csv_file, "w") as file_handler:
998 file_handler.write(header_str)
1000 file_handler.write(",".join([str(item) for item in test]) + "\n")
1002 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
1005 def table_performance_trending_dashboard(table, input_data):
1006 """Generate the table(s) with algorithm:
1007 table_performance_trending_dashboard
1008 specified in the specification file.
1010 :param table: Table to generate.
1011 :param input_data: Data to process.
1012 :type table: pandas.Series
1013 :type input_data: InputData
1016 logging.info(" Generating the table {0} ...".
1017 format(table.get("title", "")))
1019 # Transform the data
1020 logging.info(" Creating the data set for the {0} '{1}'.".
1021 format(table.get("type", ""), table.get("title", "")))
1022 data = input_data.filter_data(table, continue_on_error=True)
1024 # Prepare the header of the tables
1025 header = ["Test Case",
1027 "Short-Term Change [%]",
1028 "Long-Term Change [%]",
1032 header_str = ",".join(header) + "\n"
1034 # Prepare data to the table:
1036 for job, builds in table["data"].items():
1037 for build in builds:
1038 for tst_name, tst_data in data[job][str(build)].iteritems():
1039 if tst_name.lower() in table.get("ignore-list", list()):
1041 if tbl_dict.get(tst_name, None) is None:
1042 groups = re.search(REGEX_NIC, tst_data["parent"])
1045 nic = groups.group(0)
1046 tbl_dict[tst_name] = {
1047 "name": "{0}-{1}".format(nic, tst_data["name"]),
1048 "data": OrderedDict()}
1050 tbl_dict[tst_name]["data"][str(build)] = \
1051 tst_data["result"]["receive-rate"]
1052 except (TypeError, KeyError):
1053 pass # No data in output.xml for this test
1056 for tst_name in tbl_dict.keys():
1057 data_t = tbl_dict[tst_name]["data"]
1061 classification_lst, avgs = classify_anomalies(data_t)
1063 win_size = min(len(data_t), table["window"])
1064 long_win_size = min(len(data_t), table["long-trend-window"])
1068 [x for x in avgs[-long_win_size:-win_size]
1073 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1075 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1076 rel_change_last = nan
1078 rel_change_last = round(
1079 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1081 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1082 rel_change_long = nan
1084 rel_change_long = round(
1085 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1087 if classification_lst:
1088 if isnan(rel_change_last) and isnan(rel_change_long):
1090 if (isnan(last_avg) or
1091 isnan(rel_change_last) or
1092 isnan(rel_change_long)):
1095 [tbl_dict[tst_name]["name"],
1096 round(last_avg / 1000000, 2),
1099 classification_lst[-win_size:].count("regression"),
1100 classification_lst[-win_size:].count("progression")])
1102 tbl_lst.sort(key=lambda rel: rel[0])
1105 for nrr in range(table["window"], -1, -1):
1106 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1107 for nrp in range(table["window"], -1, -1):
1108 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1109 tbl_out.sort(key=lambda rel: rel[2])
1110 tbl_sorted.extend(tbl_out)
1112 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1114 logging.info(" Writing file: '{0}'".format(file_name))
1115 with open(file_name, "w") as file_handler:
1116 file_handler.write(header_str)
1117 for test in tbl_sorted:
1118 file_handler.write(",".join([str(item) for item in test]) + '\n')
1120 txt_file_name = "{0}.txt".format(table["output-file"])
1121 logging.info(" Writing file: '{0}'".format(txt_file_name))
1122 convert_csv_to_pretty_txt(file_name, txt_file_name)
1125 def _generate_url(base, testbed, test_name):
1126 """Generate URL to a trending plot from the name of the test case.
1128 :param base: The base part of URL common to all test cases.
1129 :param testbed: The testbed used for testing.
1130 :param test_name: The name of the test case.
1133 :type test_name: str
1134 :returns: The URL to the plot with the trending data for the given test
1144 if "lbdpdk" in test_name or "lbvpp" in test_name:
1145 file_name = "link_bonding"
1147 elif "114b" in test_name and "vhost" in test_name:
1150 elif "testpmd" in test_name or "l3fwd" in test_name:
1153 elif "memif" in test_name:
1154 file_name = "container_memif"
1157 elif "srv6" in test_name:
1160 elif "vhost" in test_name:
1161 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1162 file_name = "vm_vhost_l2"
1163 if "114b" in test_name:
1165 elif "l2xcbase" in test_name and "x520" in test_name:
1166 feature = "-base-l2xc"
1167 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1168 feature = "-base-l2bd"
1171 elif "ip4base" in test_name:
1172 file_name = "vm_vhost_ip4"
1175 elif "ipsecbasetnlsw" in test_name:
1176 file_name = "ipsecsw"
1177 feature = "-base-scale"
1179 elif "ipsec" in test_name:
1181 feature = "-base-scale"
1182 if "hw-" in test_name:
1183 file_name = "ipsechw"
1184 elif "sw-" in test_name:
1185 file_name = "ipsecsw"
1187 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1188 file_name = "ip4_tunnels"
1191 elif "ip4base" in test_name or "ip4scale" in test_name:
1193 if "xl710" in test_name:
1194 feature = "-base-scale-features"
1195 elif "iacl" in test_name:
1196 feature = "-features-iacl"
1197 elif "oacl" in test_name:
1198 feature = "-features-oacl"
1199 elif "snat" in test_name or "cop" in test_name:
1200 feature = "-features"
1202 feature = "-base-scale"
1204 elif "ip6base" in test_name or "ip6scale" in test_name:
1206 feature = "-base-scale"
1208 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1209 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1210 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1212 if "macip" in test_name:
1213 feature = "-features-macip"
1214 elif "iacl" in test_name:
1215 feature = "-features-iacl"
1216 elif "oacl" in test_name:
1217 feature = "-features-oacl"
1219 feature = "-base-scale"
1221 if "x520" in test_name:
1223 elif "x710" in test_name:
1225 elif "xl710" in test_name:
1227 elif "xxv710" in test_name:
1229 elif "vic1227" in test_name:
1231 elif "vic1385" in test_name:
1237 if "64b" in test_name:
1239 elif "78b" in test_name:
1241 elif "imix" in test_name:
1243 elif "9000b" in test_name:
1245 elif "1518b" in test_name:
1247 elif "114b" in test_name:
1251 anchor += framesize + '-'
1253 if "1t1c" in test_name:
1255 elif "2t2c" in test_name:
1257 elif "4t4c" in test_name:
1259 elif "2t1c" in test_name:
1261 elif "4t2c" in test_name:
1263 elif "8t4c" in test_name:
1266 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1267 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1270 def table_performance_trending_dashboard_html(table, input_data):
1271 """Generate the table(s) with algorithm:
1272 table_performance_trending_dashboard_html specified in the specification
1275 :param table: Table to generate.
1276 :param input_data: Data to process.
1278 :type input_data: InputData
1281 testbed = table.get("testbed", None)
1283 logging.error("The testbed is not defined for the table '{0}'.".
1284 format(table.get("title", "")))
1287 logging.info(" Generating the table {0} ...".
1288 format(table.get("title", "")))
1291 with open(table["input-file"], 'rb') as csv_file:
1292 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1293 csv_lst = [item for item in csv_content]
1295 logging.warning("The input file is not defined.")
1297 except csv.Error as err:
1298 logging.warning("Not possible to process the file '{0}'.\n{1}".
1299 format(table["input-file"], err))
1303 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1306 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1307 for idx, item in enumerate(csv_lst[0]):
1308 alignment = "left" if idx == 0 else "center"
1309 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1313 colors = {"regression": ("#ffcccc", "#ff9999"),
1314 "progression": ("#c6ecc6", "#9fdf9f"),
1315 "normal": ("#e9f1fb", "#d4e4f7")}
1316 for r_idx, row in enumerate(csv_lst[1:]):
1318 color = "regression"
1320 color = "progression"
1323 background = colors[color][r_idx % 2]
1324 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1327 for c_idx, item in enumerate(row):
1328 alignment = "left" if c_idx == 0 else "center"
1329 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1332 url = _generate_url("../trending/", testbed, item)
1333 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1338 with open(table["output-file"], 'w') as html_file:
1339 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1340 html_file.write(".. raw:: html\n\n\t")
1341 html_file.write(ET.tostring(dashboard))
1342 html_file.write("\n\t<p><br><br></p>\n")
1344 logging.warning("The output file is not defined.")
1348 def table_last_failed_tests(table, input_data):
1349 """Generate the table(s) with algorithm: table_last_failed_tests
1350 specified in the specification file.
1352 :param table: Table to generate.
1353 :param input_data: Data to process.
1354 :type table: pandas.Series
1355 :type input_data: InputData
1358 logging.info(" Generating the table {0} ...".
1359 format(table.get("title", "")))
1361 # Transform the data
1362 logging.info(" Creating the data set for the {0} '{1}'.".
1363 format(table.get("type", ""), table.get("title", "")))
1364 data = input_data.filter_data(table, continue_on_error=True)
1366 if data is None or data.empty:
1367 logging.warn(" No data for the {0} '{1}'.".
1368 format(table.get("type", ""), table.get("title", "")))
1372 for job, builds in table["data"].items():
1373 for build in builds:
1376 version = input_data.metadata(job, build).get("version", "")
1378 logging.error("Data for {job}: {build} is not present.".
1379 format(job=job, build=build))
1381 tbl_list.append(build)
1382 tbl_list.append(version)
1383 for tst_name, tst_data in data[job][build].iteritems():
1384 if tst_data["status"] != "FAIL":
1386 groups = re.search(REGEX_NIC, tst_data["parent"])
1389 nic = groups.group(0)
1390 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1392 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1393 logging.info(" Writing file: '{0}'".format(file_name))
1394 with open(file_name, "w") as file_handler:
1395 for test in tbl_list:
1396 file_handler.write(test + '\n')
1399 def table_failed_tests(table, input_data):
1400 """Generate the table(s) with algorithm: table_failed_tests
1401 specified in the specification file.
1403 :param table: Table to generate.
1404 :param input_data: Data to process.
1405 :type table: pandas.Series
1406 :type input_data: InputData
1409 logging.info(" Generating the table {0} ...".
1410 format(table.get("title", "")))
1412 # Transform the data
1413 logging.info(" Creating the data set for the {0} '{1}'.".
1414 format(table.get("type", ""), table.get("title", "")))
1415 data = input_data.filter_data(table, continue_on_error=True)
1417 # Prepare the header of the tables
1418 header = ["Test Case",
1420 "Last Failure [Time]",
1421 "Last Failure [VPP-Build-Id]",
1422 "Last Failure [CSIT-Job-Build-Id]"]
1424 # Generate the data for the table according to the model in the table
1428 timeperiod = timedelta(int(table.get("window", 7)))
1431 for job, builds in table["data"].items():
1432 for build in builds:
1434 for tst_name, tst_data in data[job][build].iteritems():
1435 if tst_name.lower() in table.get("ignore-list", list()):
1437 if tbl_dict.get(tst_name, None) is None:
1438 groups = re.search(REGEX_NIC, tst_data["parent"])
1441 nic = groups.group(0)
1442 tbl_dict[tst_name] = {
1443 "name": "{0}-{1}".format(nic, tst_data["name"]),
1444 "data": OrderedDict()}
1446 generated = input_data.metadata(job, build).\
1447 get("generated", "")
1450 then = dt.strptime(generated, "%Y%m%d %H:%M")
1451 if (now - then) <= timeperiod:
1452 tbl_dict[tst_name]["data"][build] = (
1455 input_data.metadata(job, build).get("version", ""),
1457 except (TypeError, KeyError) as err:
1458 logging.warning("tst_name: {} - err: {}".
1459 format(tst_name, repr(err)))
1463 for tst_data in tbl_dict.values():
1465 for val in tst_data["data"].values():
1466 if val[0] == "FAIL":
1468 fails_last_date = val[1]
1469 fails_last_vpp = val[2]
1470 fails_last_csit = val[3]
1472 max_fails = fails_nr if fails_nr > max_fails else max_fails
1473 tbl_lst.append([tst_data["name"],
1477 "mrr-daily-build-{0}".format(fails_last_csit)])
1479 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1481 for nrf in range(max_fails, -1, -1):
1482 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1483 tbl_sorted.extend(tbl_fails)
1484 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1486 logging.info(" Writing file: '{0}'".format(file_name))
1487 with open(file_name, "w") as file_handler:
1488 file_handler.write(",".join(header) + "\n")
1489 for test in tbl_sorted:
1490 file_handler.write(",".join([str(item) for item in test]) + '\n')
1492 txt_file_name = "{0}.txt".format(table["output-file"])
1493 logging.info(" Writing file: '{0}'".format(txt_file_name))
1494 convert_csv_to_pretty_txt(file_name, txt_file_name)
1497 def table_failed_tests_html(table, input_data):
1498 """Generate the table(s) with algorithm: table_failed_tests_html
1499 specified in the specification file.
1501 :param table: Table to generate.
1502 :param input_data: Data to process.
1503 :type table: pandas.Series
1504 :type input_data: InputData
1507 testbed = table.get("testbed", None)
1509 logging.error("The testbed is not defined for the table '{0}'.".
1510 format(table.get("title", "")))
1513 logging.info(" Generating the table {0} ...".
1514 format(table.get("title", "")))
1517 with open(table["input-file"], 'rb') as csv_file:
1518 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1519 csv_lst = [item for item in csv_content]
1521 logging.warning("The input file is not defined.")
1523 except csv.Error as err:
1524 logging.warning("Not possible to process the file '{0}'.\n{1}".
1525 format(table["input-file"], err))
1529 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1532 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1533 for idx, item in enumerate(csv_lst[0]):
1534 alignment = "left" if idx == 0 else "center"
1535 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1539 colors = ("#e9f1fb", "#d4e4f7")
1540 for r_idx, row in enumerate(csv_lst[1:]):
1541 background = colors[r_idx % 2]
1542 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1545 for c_idx, item in enumerate(row):
1546 alignment = "left" if c_idx == 0 else "center"
1547 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1550 url = _generate_url("../trending/", testbed, item)
1551 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1556 with open(table["output-file"], 'w') as html_file:
1557 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1558 html_file.write(".. raw:: html\n\n\t")
1559 html_file.write(ET.tostring(failed_tests))
1560 html_file.write("\n\t<p><br><br></p>\n")
1562 logging.warning("The output file is not defined.")