1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("vat-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 if column["data"].split(" ")[1] in ("vat-history",
166 col_data = replace(col_data, " |br| ", "",
168 col_data = " |prein| {0} |preout| ".\
169 format(col_data[:-5])
170 row_lst.append('"{0}"'.format(col_data))
172 row_lst.append("No data")
173 table_lst.append(row_lst)
175 # Write the data to file
177 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178 table["output-file-ext"])
179 logging.info(" Writing file: '{}'".format(file_name))
180 with open(file_name, "w") as file_handler:
181 file_handler.write(",".join(header) + "\n")
182 for item in table_lst:
183 file_handler.write(",".join(item) + "\n")
185 logging.info(" Done.")
188 def table_performance_comparison(table, input_data):
189 """Generate the table(s) with algorithm: table_performance_comparison
190 specified in the specification file.
192 :param table: Table to generate.
193 :param input_data: Data to process.
194 :type table: pandas.Series
195 :type input_data: InputData
198 logging.info(" Generating the table {0} ...".
199 format(table.get("title", "")))
202 logging.info(" Creating the data set for the {0} '{1}'.".
203 format(table.get("type", ""), table.get("title", "")))
204 data = input_data.filter_data(table, continue_on_error=True)
206 # Prepare the header of the tables
208 header = ["Test case", ]
210 if table["include-tests"] == "MRR":
211 hdr_param = "Receive Rate"
213 hdr_param = "Throughput"
215 history = table.get("history", None)
219 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220 "{0} Stdev [Mpps]".format(item["title"])])
222 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
227 header_str = ",".join(header) + "\n"
228 except (AttributeError, KeyError) as err:
229 logging.error("The model is invalid, missing parameter: {0}".
233 # Prepare data to the table:
235 for job, builds in table["reference"]["data"].items():
237 for tst_name, tst_data in data[job][str(build)].iteritems():
238 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239 replace("-ndrpdr", "").replace("-pdrdisc", "").\
240 replace("-ndrdisc", "").replace("-pdr", "").\
241 replace("-ndr", "").\
242 replace("1t1c", "1c").replace("2t1c", "1c").\
243 replace("2t2c", "2c").replace("4t2c", "2c").\
244 replace("4t4c", "4c").replace("8t4c", "4c")
245 if "across topologies" in table["title"].lower():
246 tst_name_mod = tst_name_mod.replace("2n1l-", "")
247 if tbl_dict.get(tst_name_mod, None) is None:
248 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
249 "-".join(tst_data["name"].
251 if "across testbeds" in table["title"].lower() or \
252 "across topologies" in table["title"].lower():
254 replace("1t1c", "1c").replace("2t1c", "1c").\
255 replace("2t2c", "2c").replace("4t2c", "2c").\
256 replace("4t4c", "4c").replace("8t4c", "4c")
257 tbl_dict[tst_name_mod] = {"name": name,
261 # TODO: Re-work when NDRPDRDISC tests are not used
262 if table["include-tests"] == "MRR":
263 tbl_dict[tst_name_mod]["ref-data"]. \
264 append(tst_data["result"]["receive-rate"].avg)
265 elif table["include-tests"] == "PDR":
266 if tst_data["type"] == "PDR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["throughput"]["value"])
269 elif tst_data["type"] == "NDRPDR":
270 tbl_dict[tst_name_mod]["ref-data"].append(
271 tst_data["throughput"]["PDR"]["LOWER"])
272 elif table["include-tests"] == "NDR":
273 if tst_data["type"] == "NDR":
274 tbl_dict[tst_name_mod]["ref-data"]. \
275 append(tst_data["throughput"]["value"])
276 elif tst_data["type"] == "NDRPDR":
277 tbl_dict[tst_name_mod]["ref-data"].append(
278 tst_data["throughput"]["NDR"]["LOWER"])
282 pass # No data in output.xml for this test
284 for job, builds in table["compare"]["data"].items():
286 for tst_name, tst_data in data[job][str(build)].iteritems():
287 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
288 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
289 replace("-ndrdisc", "").replace("-pdr", ""). \
290 replace("-ndr", "").\
291 replace("1t1c", "1c").replace("2t1c", "1c").\
292 replace("2t2c", "2c").replace("4t2c", "2c").\
293 replace("4t4c", "4c").replace("8t4c", "4c")
294 if "across topologies" in table["title"].lower():
295 tst_name_mod = tst_name_mod.replace("2n1l-", "")
297 # TODO: Re-work when NDRPDRDISC tests are not used
298 if table["include-tests"] == "MRR":
299 tbl_dict[tst_name_mod]["cmp-data"]. \
300 append(tst_data["result"]["receive-rate"].avg)
301 elif table["include-tests"] == "PDR":
302 if tst_data["type"] == "PDR":
303 tbl_dict[tst_name_mod]["cmp-data"]. \
304 append(tst_data["throughput"]["value"])
305 elif tst_data["type"] == "NDRPDR":
306 tbl_dict[tst_name_mod]["cmp-data"].append(
307 tst_data["throughput"]["PDR"]["LOWER"])
308 elif table["include-tests"] == "NDR":
309 if tst_data["type"] == "NDR":
310 tbl_dict[tst_name_mod]["cmp-data"]. \
311 append(tst_data["throughput"]["value"])
312 elif tst_data["type"] == "NDRPDR":
313 tbl_dict[tst_name_mod]["cmp-data"].append(
314 tst_data["throughput"]["NDR"]["LOWER"])
320 tbl_dict.pop(tst_name_mod, None)
323 for job, builds in item["data"].items():
325 for tst_name, tst_data in data[job][str(build)].iteritems():
326 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
327 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
328 replace("-ndrdisc", "").replace("-pdr", ""). \
329 replace("-ndr", "").\
330 replace("1t1c", "1c").replace("2t1c", "1c").\
331 replace("2t2c", "2c").replace("4t2c", "2c").\
332 replace("4t4c", "4c").replace("8t4c", "4c")
333 if "across topologies" in table["title"].lower():
334 tst_name_mod = tst_name_mod.replace("2n1l-", "")
335 if tbl_dict.get(tst_name_mod, None) is None:
337 if tbl_dict[tst_name_mod].get("history", None) is None:
338 tbl_dict[tst_name_mod]["history"] = OrderedDict()
339 if tbl_dict[tst_name_mod]["history"].get(item["title"],
341 tbl_dict[tst_name_mod]["history"][item["title"]] = \
344 # TODO: Re-work when NDRPDRDISC tests are not used
345 if table["include-tests"] == "MRR":
346 tbl_dict[tst_name_mod]["history"][item["title"
347 ]].append(tst_data["result"]["receive-rate"].
349 elif table["include-tests"] == "PDR":
350 if tst_data["type"] == "PDR":
351 tbl_dict[tst_name_mod]["history"][
353 append(tst_data["throughput"]["value"])
354 elif tst_data["type"] == "NDRPDR":
355 tbl_dict[tst_name_mod]["history"][item[
356 "title"]].append(tst_data["throughput"][
358 elif table["include-tests"] == "NDR":
359 if tst_data["type"] == "NDR":
360 tbl_dict[tst_name_mod]["history"][
362 append(tst_data["throughput"]["value"])
363 elif tst_data["type"] == "NDRPDR":
364 tbl_dict[tst_name_mod]["history"][item[
365 "title"]].append(tst_data["throughput"][
369 except (TypeError, KeyError):
373 for tst_name in tbl_dict.keys():
374 item = [tbl_dict[tst_name]["name"], ]
376 if tbl_dict[tst_name].get("history", None) is not None:
377 for hist_data in tbl_dict[tst_name]["history"].values():
379 item.append(round(mean(hist_data) / 1000000, 2))
380 item.append(round(stdev(hist_data) / 1000000, 2))
382 item.extend([None, None])
384 item.extend([None, None])
385 data_t = tbl_dict[tst_name]["ref-data"]
387 item.append(round(mean(data_t) / 1000000, 2))
388 item.append(round(stdev(data_t) / 1000000, 2))
390 item.extend([None, None])
391 data_t = tbl_dict[tst_name]["cmp-data"]
393 item.append(round(mean(data_t) / 1000000, 2))
394 item.append(round(stdev(data_t) / 1000000, 2))
396 item.extend([None, None])
397 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
398 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
399 if len(item) == len(header):
402 # Sort the table according to the relative change
403 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
405 # Generate csv tables:
406 csv_file = "{0}.csv".format(table["output-file"])
407 with open(csv_file, "w") as file_handler:
408 file_handler.write(header_str)
410 file_handler.write(",".join([str(item) for item in test]) + "\n")
412 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
415 def table_performance_trending_dashboard(table, input_data):
416 """Generate the table(s) with algorithm:
417 table_performance_trending_dashboard
418 specified in the specification file.
420 :param table: Table to generate.
421 :param input_data: Data to process.
422 :type table: pandas.Series
423 :type input_data: InputData
426 logging.info(" Generating the table {0} ...".
427 format(table.get("title", "")))
430 logging.info(" Creating the data set for the {0} '{1}'.".
431 format(table.get("type", ""), table.get("title", "")))
432 data = input_data.filter_data(table, continue_on_error=True)
434 # Prepare the header of the tables
435 header = ["Test Case",
437 "Short-Term Change [%]",
438 "Long-Term Change [%]",
442 header_str = ",".join(header) + "\n"
444 # Prepare data to the table:
446 for job, builds in table["data"].items():
448 for tst_name, tst_data in data[job][str(build)].iteritems():
449 if tst_name.lower() in table["ignore-list"]:
451 if tbl_dict.get(tst_name, None) is None:
452 groups = re.search(REGEX_NIC, tst_data["parent"])
455 nic = groups.group(0)
456 tbl_dict[tst_name] = {
457 "name": "{0}-{1}".format(nic, tst_data["name"]),
458 "data": OrderedDict()}
460 tbl_dict[tst_name]["data"][str(build)] = \
461 tst_data["result"]["receive-rate"]
462 except (TypeError, KeyError):
463 pass # No data in output.xml for this test
466 for tst_name in tbl_dict.keys():
467 data_t = tbl_dict[tst_name]["data"]
471 classification_lst, avgs = classify_anomalies(data_t)
473 win_size = min(len(data_t), table["window"])
474 long_win_size = min(len(data_t), table["long-trend-window"])
478 [x for x in avgs[-long_win_size:-win_size]
483 avg_week_ago = avgs[max(-win_size, -len(avgs))]
485 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
486 rel_change_last = nan
488 rel_change_last = round(
489 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
491 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
492 rel_change_long = nan
494 rel_change_long = round(
495 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
497 if classification_lst:
498 if isnan(rel_change_last) and isnan(rel_change_long):
500 if (isnan(last_avg) or
501 isnan(rel_change_last) or
502 isnan(rel_change_long)):
505 [tbl_dict[tst_name]["name"],
506 round(last_avg / 1000000, 2),
509 classification_lst[-win_size:].count("regression"),
510 classification_lst[-win_size:].count("progression")])
512 tbl_lst.sort(key=lambda rel: rel[0])
515 for nrr in range(table["window"], -1, -1):
516 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
517 for nrp in range(table["window"], -1, -1):
518 tbl_out = [item for item in tbl_reg if item[5] == nrp]
519 tbl_out.sort(key=lambda rel: rel[2])
520 tbl_sorted.extend(tbl_out)
522 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
524 logging.info(" Writing file: '{0}'".format(file_name))
525 with open(file_name, "w") as file_handler:
526 file_handler.write(header_str)
527 for test in tbl_sorted:
528 file_handler.write(",".join([str(item) for item in test]) + '\n')
530 txt_file_name = "{0}.txt".format(table["output-file"])
531 logging.info(" Writing file: '{0}'".format(txt_file_name))
532 convert_csv_to_pretty_txt(file_name, txt_file_name)
535 def _generate_url(base, testbed, test_name):
536 """Generate URL to a trending plot from the name of the test case.
538 :param base: The base part of URL common to all test cases.
539 :param testbed: The testbed used for testing.
540 :param test_name: The name of the test case.
544 :returns: The URL to the plot with the trending data for the given test
554 if "lbdpdk" in test_name or "lbvpp" in test_name:
555 file_name = "link_bonding"
557 elif "114b" in test_name and "vhost" in test_name:
560 elif "testpmd" in test_name or "l3fwd" in test_name:
563 elif "memif" in test_name:
564 file_name = "container_memif"
567 elif "srv6" in test_name:
570 elif "vhost" in test_name:
571 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
572 file_name = "vm_vhost_l2"
573 if "114b" in test_name:
575 elif "l2xcbase" in test_name:
576 feature = "-base-l2xc"
577 elif "l2bdbasemaclrn" in test_name:
578 feature = "-base-l2bd"
581 elif "ip4base" in test_name:
582 file_name = "vm_vhost_ip4"
585 elif "ipsec" in test_name:
587 feature = "-base-scale"
589 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
590 file_name = "ip4_tunnels"
593 elif "ip4base" in test_name or "ip4scale" in test_name:
595 if "xl710" in test_name:
596 feature = "-base-scale-features"
597 elif "iacl" in test_name:
598 feature = "-features-iacl"
599 elif "oacl" in test_name:
600 feature = "-features-oacl"
601 elif "snat" in test_name or "cop" in test_name:
602 feature = "-features"
604 feature = "-base-scale"
606 elif "ip6base" in test_name or "ip6scale" in test_name:
608 feature = "-base-scale"
610 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
611 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
612 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
614 if "macip" in test_name:
615 feature = "-features-macip"
616 elif "iacl" in test_name:
617 feature = "-features-iacl"
618 elif "oacl" in test_name:
619 feature = "-features-oacl"
621 feature = "-base-scale"
623 if "x520" in test_name:
625 elif "x710" in test_name:
627 elif "xl710" in test_name:
629 elif "xxv710" in test_name:
631 elif "vic1227" in test_name:
633 elif "vic1385" in test_name:
639 if "64b" in test_name:
641 elif "78b" in test_name:
643 elif "imix" in test_name:
645 elif "9000b" in test_name:
647 elif "1518b" in test_name:
649 elif "114b" in test_name:
653 anchor += framesize + '-'
655 if "1t1c" in test_name:
657 elif "2t2c" in test_name:
659 elif "4t4c" in test_name:
661 elif "2t1c" in test_name:
663 elif "4t2c" in test_name:
665 elif "8t4c" in test_name:
668 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
672 def table_performance_trending_dashboard_html(table, input_data):
673 """Generate the table(s) with algorithm:
674 table_performance_trending_dashboard_html specified in the specification
677 :param table: Table to generate.
678 :param input_data: Data to process.
680 :type input_data: InputData
683 testbed = table.get("testbed", None)
685 logging.error("The testbed is not defined for the table '{0}'.".
686 format(table.get("title", "")))
689 logging.info(" Generating the table {0} ...".
690 format(table.get("title", "")))
693 with open(table["input-file"], 'rb') as csv_file:
694 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
695 csv_lst = [item for item in csv_content]
697 logging.warning("The input file is not defined.")
699 except csv.Error as err:
700 logging.warning("Not possible to process the file '{0}'.\n{1}".
701 format(table["input-file"], err))
705 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
708 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
709 for idx, item in enumerate(csv_lst[0]):
710 alignment = "left" if idx == 0 else "center"
711 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
715 colors = {"regression": ("#ffcccc", "#ff9999"),
716 "progression": ("#c6ecc6", "#9fdf9f"),
717 "normal": ("#e9f1fb", "#d4e4f7")}
718 for r_idx, row in enumerate(csv_lst[1:]):
722 color = "progression"
725 background = colors[color][r_idx % 2]
726 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
729 for c_idx, item in enumerate(row):
730 alignment = "left" if c_idx == 0 else "center"
731 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
734 url = _generate_url("../trending/", testbed, item)
735 ref = ET.SubElement(td, "a", attrib=dict(href=url))
740 with open(table["output-file"], 'w') as html_file:
741 logging.info(" Writing file: '{0}'".format(table["output-file"]))
742 html_file.write(".. raw:: html\n\n\t")
743 html_file.write(ET.tostring(dashboard))
744 html_file.write("\n\t<p><br><br></p>\n")
746 logging.warning("The output file is not defined.")
750 def table_failed_tests(table, input_data):
751 """Generate the table(s) with algorithm: table_failed_tests
752 specified in the specification file.
754 :param table: Table to generate.
755 :param input_data: Data to process.
756 :type table: pandas.Series
757 :type input_data: InputData
760 logging.info(" Generating the table {0} ...".
761 format(table.get("title", "")))
764 logging.info(" Creating the data set for the {0} '{1}'.".
765 format(table.get("type", ""), table.get("title", "")))
766 data = input_data.filter_data(table, continue_on_error=True)
768 # Prepare the header of the tables
769 header = ["Test Case",
771 "Last Failure [Time]",
772 "Last Failure [VPP-Build-Id]",
773 "Last Failure [CSIT-Job-Build-Id]"]
775 # Generate the data for the table according to the model in the table
779 timeperiod = timedelta(int(table.get("window", 7)))
782 for job, builds in table["data"].items():
785 for tst_name, tst_data in data[job][build].iteritems():
786 if tst_name.lower() in table["ignore-list"]:
788 if tbl_dict.get(tst_name, None) is None:
789 groups = re.search(REGEX_NIC, tst_data["parent"])
792 nic = groups.group(0)
793 tbl_dict[tst_name] = {
794 "name": "{0}-{1}".format(nic, tst_data["name"]),
795 "data": OrderedDict()}
797 generated = input_data.metadata(job, build).\
801 then = dt.strptime(generated, "%Y%m%d %H:%M")
802 if (now - then) <= timeperiod:
803 tbl_dict[tst_name]["data"][build] = (
806 input_data.metadata(job, build).get("version", ""),
808 except (TypeError, KeyError) as err:
809 logging.warning("tst_name: {} - err: {}".
810 format(tst_name, repr(err)))
814 for tst_data in tbl_dict.values():
816 for val in tst_data["data"].values():
819 fails_last_date = val[1]
820 fails_last_vpp = val[2]
821 fails_last_csit = val[3]
823 max_fails = fails_nr if fails_nr > max_fails else max_fails
824 tbl_lst.append([tst_data["name"],
828 "mrr-daily-build-{0}".format(fails_last_csit)])
830 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
832 for nrf in range(max_fails, -1, -1):
833 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
834 tbl_sorted.extend(tbl_fails)
835 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
837 logging.info(" Writing file: '{0}'".format(file_name))
838 with open(file_name, "w") as file_handler:
839 file_handler.write(",".join(header) + "\n")
840 for test in tbl_sorted:
841 file_handler.write(",".join([str(item) for item in test]) + '\n')
843 txt_file_name = "{0}.txt".format(table["output-file"])
844 logging.info(" Writing file: '{0}'".format(txt_file_name))
845 convert_csv_to_pretty_txt(file_name, txt_file_name)
848 def table_failed_tests_html(table, input_data):
849 """Generate the table(s) with algorithm: table_failed_tests_html
850 specified in the specification file.
852 :param table: Table to generate.
853 :param input_data: Data to process.
854 :type table: pandas.Series
855 :type input_data: InputData
858 testbed = table.get("testbed", None)
860 logging.error("The testbed is not defined for the table '{0}'.".
861 format(table.get("title", "")))
864 logging.info(" Generating the table {0} ...".
865 format(table.get("title", "")))
868 with open(table["input-file"], 'rb') as csv_file:
869 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
870 csv_lst = [item for item in csv_content]
872 logging.warning("The input file is not defined.")
874 except csv.Error as err:
875 logging.warning("Not possible to process the file '{0}'.\n{1}".
876 format(table["input-file"], err))
880 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
883 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
884 for idx, item in enumerate(csv_lst[0]):
885 alignment = "left" if idx == 0 else "center"
886 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
890 colors = ("#e9f1fb", "#d4e4f7")
891 for r_idx, row in enumerate(csv_lst[1:]):
892 background = colors[r_idx % 2]
893 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
896 for c_idx, item in enumerate(row):
897 alignment = "left" if c_idx == 0 else "center"
898 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
901 url = _generate_url("../trending/", testbed, item)
902 ref = ET.SubElement(td, "a", attrib=dict(href=url))
907 with open(table["output-file"], 'w') as html_file:
908 logging.info(" Writing file: '{0}'".format(table["output-file"]))
909 html_file.write(".. raw:: html\n\n\t")
910 html_file.write(ET.tostring(failed_tests))
911 html_file.write("\n\t<p><br><br></p>\n")
913 logging.warning("The output file is not defined.")