1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("vat-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 if column["data"].split(" ")[1] in ("vat-history",
166 col_data = replace(col_data, " |br| ", "",
168 col_data = " |prein| {0} |preout| ".\
169 format(col_data[:-5])
170 row_lst.append('"{0}"'.format(col_data))
172 row_lst.append("No data")
173 table_lst.append(row_lst)
175 # Write the data to file
177 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178 table["output-file-ext"])
179 logging.info(" Writing file: '{}'".format(file_name))
180 with open(file_name, "w") as file_handler:
181 file_handler.write(",".join(header) + "\n")
182 for item in table_lst:
183 file_handler.write(",".join(item) + "\n")
185 logging.info(" Done.")
188 def table_performance_comparison(table, input_data):
189 """Generate the table(s) with algorithm: table_performance_comparison
190 specified in the specification file.
192 :param table: Table to generate.
193 :param input_data: Data to process.
194 :type table: pandas.Series
195 :type input_data: InputData
198 logging.info(" Generating the table {0} ...".
199 format(table.get("title", "")))
202 logging.info(" Creating the data set for the {0} '{1}'.".
203 format(table.get("type", ""), table.get("title", "")))
204 data = input_data.filter_data(table, continue_on_error=True)
206 # Prepare the header of the tables
208 header = ["Test case", ]
210 if table["include-tests"] == "MRR":
211 hdr_param = "Receive Rate"
213 hdr_param = "Throughput"
215 history = table.get("history", None)
219 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220 "{0} Stdev [Mpps]".format(item["title"])])
222 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
227 header_str = ",".join(header) + "\n"
228 except (AttributeError, KeyError) as err:
229 logging.error("The model is invalid, missing parameter: {0}".
233 # Prepare data to the table:
235 for job, builds in table["reference"]["data"].items():
237 for tst_name, tst_data in data[job][str(build)].iteritems():
238 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239 replace("-ndrpdr", "").replace("-pdrdisc", "").\
240 replace("-ndrdisc", "").replace("-pdr", "").\
241 replace("-ndr", "").\
242 replace("1t1c", "1c").replace("2t1c", "1c").\
243 replace("2t2c", "2c").replace("4t2c", "2c").\
244 replace("4t4c", "4c").replace("8t4c", "4c")
245 if "across topologies" in table["title"].lower():
246 tst_name_mod = tst_name_mod.replace("2n1l-", "")
247 if tbl_dict.get(tst_name_mod, None) is None:
248 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
249 "-".join(tst_data["name"].
251 if "across testbeds" in table["title"].lower() or \
252 "across topologies" in table["title"].lower():
254 replace("1t1c", "1c").replace("2t1c", "1c").\
255 replace("2t2c", "2c").replace("4t2c", "2c").\
256 replace("4t4c", "4c").replace("8t4c", "4c")
257 tbl_dict[tst_name_mod] = {"name": name,
261 # TODO: Re-work when NDRPDRDISC tests are not used
262 if table["include-tests"] == "MRR":
263 tbl_dict[tst_name_mod]["ref-data"]. \
264 append(tst_data["result"]["receive-rate"].avg)
265 elif table["include-tests"] == "PDR":
266 if tst_data["type"] == "PDR":
267 tbl_dict[tst_name_mod]["ref-data"]. \
268 append(tst_data["throughput"]["value"])
269 elif tst_data["type"] == "NDRPDR":
270 tbl_dict[tst_name_mod]["ref-data"].append(
271 tst_data["throughput"]["PDR"]["LOWER"])
272 elif table["include-tests"] == "NDR":
273 if tst_data["type"] == "NDR":
274 tbl_dict[tst_name_mod]["ref-data"]. \
275 append(tst_data["throughput"]["value"])
276 elif tst_data["type"] == "NDRPDR":
277 tbl_dict[tst_name_mod]["ref-data"].append(
278 tst_data["throughput"]["NDR"]["LOWER"])
282 pass # No data in output.xml for this test
284 for job, builds in table["compare"]["data"].items():
286 for tst_name, tst_data in data[job][str(build)].iteritems():
287 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
288 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
289 replace("-ndrdisc", "").replace("-pdr", ""). \
290 replace("-ndr", "").\
291 replace("1t1c", "1c").replace("2t1c", "1c").\
292 replace("2t2c", "2c").replace("4t2c", "2c").\
293 replace("4t4c", "4c").replace("8t4c", "4c")
294 if "across topologies" in table["title"].lower():
295 tst_name_mod = tst_name_mod.replace("2n1l-", "")
297 # TODO: Re-work when NDRPDRDISC tests are not used
298 if table["include-tests"] == "MRR":
299 tbl_dict[tst_name_mod]["cmp-data"]. \
300 append(tst_data["result"]["receive-rate"].avg)
301 elif table["include-tests"] == "PDR":
302 if tst_data["type"] == "PDR":
303 tbl_dict[tst_name_mod]["cmp-data"]. \
304 append(tst_data["throughput"]["value"])
305 elif tst_data["type"] == "NDRPDR":
306 tbl_dict[tst_name_mod]["cmp-data"].append(
307 tst_data["throughput"]["PDR"]["LOWER"])
308 elif table["include-tests"] == "NDR":
309 if tst_data["type"] == "NDR":
310 tbl_dict[tst_name_mod]["cmp-data"]. \
311 append(tst_data["throughput"]["value"])
312 elif tst_data["type"] == "NDRPDR":
313 tbl_dict[tst_name_mod]["cmp-data"].append(
314 tst_data["throughput"]["NDR"]["LOWER"])
320 tbl_dict.pop(tst_name_mod, None)
323 for job, builds in item["data"].items():
325 for tst_name, tst_data in data[job][str(build)].iteritems():
326 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
327 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
328 replace("-ndrdisc", "").replace("-pdr", ""). \
329 replace("-ndr", "").\
330 replace("1t1c", "1c").replace("2t1c", "1c").\
331 replace("2t2c", "2c").replace("4t2c", "2c").\
332 replace("4t4c", "4c").replace("8t4c", "4c")
333 if "across topologies" in table["title"].lower():
334 tst_name_mod = tst_name_mod.replace("2n1l-", "")
335 if tbl_dict.get(tst_name_mod, None) is None:
337 if tbl_dict[tst_name_mod].get("history", None) is None:
338 tbl_dict[tst_name_mod]["history"] = OrderedDict()
339 if tbl_dict[tst_name_mod]["history"].get(item["title"],
341 tbl_dict[tst_name_mod]["history"][item["title"]] = \
344 # TODO: Re-work when NDRPDRDISC tests are not used
345 if table["include-tests"] == "MRR":
346 tbl_dict[tst_name_mod]["history"][item["title"
347 ]].append(tst_data["result"]["receive-rate"].
349 elif table["include-tests"] == "PDR":
350 if tst_data["type"] == "PDR":
351 tbl_dict[tst_name_mod]["history"][
353 append(tst_data["throughput"]["value"])
354 elif tst_data["type"] == "NDRPDR":
355 tbl_dict[tst_name_mod]["history"][item[
356 "title"]].append(tst_data["throughput"][
358 elif table["include-tests"] == "NDR":
359 if tst_data["type"] == "NDR":
360 tbl_dict[tst_name_mod]["history"][
362 append(tst_data["throughput"]["value"])
363 elif tst_data["type"] == "NDRPDR":
364 tbl_dict[tst_name_mod]["history"][item[
365 "title"]].append(tst_data["throughput"][
369 except (TypeError, KeyError):
373 for tst_name in tbl_dict.keys():
374 item = [tbl_dict[tst_name]["name"], ]
376 if tbl_dict[tst_name].get("history", None) is not None:
377 for hist_data in tbl_dict[tst_name]["history"].values():
379 item.append(round(mean(hist_data) / 1000000, 2))
380 item.append(round(stdev(hist_data) / 1000000, 2))
382 item.extend([None, None])
384 item.extend([None, None])
385 data_t = tbl_dict[tst_name]["ref-data"]
387 item.append(round(mean(data_t) / 1000000, 2))
388 item.append(round(stdev(data_t) / 1000000, 2))
390 item.extend([None, None])
391 data_t = tbl_dict[tst_name]["cmp-data"]
393 item.append(round(mean(data_t) / 1000000, 2))
394 item.append(round(stdev(data_t) / 1000000, 2))
396 item.extend([None, None])
397 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
398 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
399 if len(item) == len(header):
402 # Sort the table according to the relative change
403 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
405 # Generate csv tables:
406 csv_file = "{0}.csv".format(table["output-file"])
407 with open(csv_file, "w") as file_handler:
408 file_handler.write(header_str)
410 file_handler.write(",".join([str(item) for item in test]) + "\n")
412 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
415 def table_performance_trending_dashboard(table, input_data):
416 """Generate the table(s) with algorithm:
417 table_performance_trending_dashboard
418 specified in the specification file.
420 :param table: Table to generate.
421 :param input_data: Data to process.
422 :type table: pandas.Series
423 :type input_data: InputData
426 logging.info(" Generating the table {0} ...".
427 format(table.get("title", "")))
430 logging.info(" Creating the data set for the {0} '{1}'.".
431 format(table.get("type", ""), table.get("title", "")))
432 data = input_data.filter_data(table, continue_on_error=True)
434 # Prepare the header of the tables
435 header = ["Test Case",
437 "Short-Term Change [%]",
438 "Long-Term Change [%]",
442 header_str = ",".join(header) + "\n"
444 # Prepare data to the table:
446 for job, builds in table["data"].items():
448 for tst_name, tst_data in data[job][str(build)].iteritems():
449 if tst_name.lower() in table["ignore-list"]:
451 if tbl_dict.get(tst_name, None) is None:
452 groups = re.search(REGEX_NIC, tst_data["parent"])
455 nic = groups.group(0)
456 tbl_dict[tst_name] = {
457 "name": "{0}-{1}".format(nic, tst_data["name"]),
458 "data": OrderedDict()}
460 tbl_dict[tst_name]["data"][str(build)] = \
461 tst_data["result"]["receive-rate"]
462 except (TypeError, KeyError):
463 pass # No data in output.xml for this test
466 for tst_name in tbl_dict.keys():
467 data_t = tbl_dict[tst_name]["data"]
471 classification_lst, avgs = classify_anomalies(data_t)
473 win_size = min(len(data_t), table["window"])
474 long_win_size = min(len(data_t), table["long-trend-window"])
478 [x for x in avgs[-long_win_size:-win_size]
483 avg_week_ago = avgs[max(-win_size, -len(avgs))]
485 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
486 rel_change_last = nan
488 rel_change_last = round(
489 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
491 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
492 rel_change_long = nan
494 rel_change_long = round(
495 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
497 if classification_lst:
498 if isnan(rel_change_last) and isnan(rel_change_long):
500 if (isnan(last_avg) or
501 isnan(rel_change_last) or
502 isnan(rel_change_long)):
505 [tbl_dict[tst_name]["name"],
506 round(last_avg / 1000000, 2),
509 classification_lst[-win_size:].count("regression"),
510 classification_lst[-win_size:].count("progression")])
512 tbl_lst.sort(key=lambda rel: rel[0])
515 for nrr in range(table["window"], -1, -1):
516 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
517 for nrp in range(table["window"], -1, -1):
518 tbl_out = [item for item in tbl_reg if item[5] == nrp]
519 tbl_out.sort(key=lambda rel: rel[2])
520 tbl_sorted.extend(tbl_out)
522 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
524 logging.info(" Writing file: '{0}'".format(file_name))
525 with open(file_name, "w") as file_handler:
526 file_handler.write(header_str)
527 for test in tbl_sorted:
528 file_handler.write(",".join([str(item) for item in test]) + '\n')
530 txt_file_name = "{0}.txt".format(table["output-file"])
531 logging.info(" Writing file: '{0}'".format(txt_file_name))
532 convert_csv_to_pretty_txt(file_name, txt_file_name)
535 def _generate_url(base, testbed, test_name):
536 """Generate URL to a trending plot from the name of the test case.
538 :param base: The base part of URL common to all test cases.
539 :param testbed: The testbed used for testing.
540 :param test_name: The name of the test case.
544 :returns: The URL to the plot with the trending data for the given test
554 if "lbdpdk" in test_name or "lbvpp" in test_name:
555 file_name = "link_bonding"
557 elif "114b" in test_name and "vhost" in test_name:
560 elif "testpmd" in test_name or "l3fwd" in test_name:
563 elif "memif" in test_name:
564 file_name = "container_memif"
567 elif "srv6" in test_name:
570 elif "vhost" in test_name:
571 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
572 file_name = "vm_vhost_l2"
573 if "114b" in test_name:
575 elif "l2xcbase" in test_name:
576 feature = "-base-l2xc"
577 elif "l2bdbasemaclrn" in test_name:
578 feature = "-base-l2bd"
581 elif "ip4base" in test_name:
582 file_name = "vm_vhost_ip4"
585 elif "ipsec" in test_name:
587 feature = "-base-scale"
589 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
590 file_name = "ip4_tunnels"
593 elif "ip4base" in test_name or "ip4scale" in test_name:
595 if "xl710" in test_name:
596 feature = "-base-scale-features"
597 elif "iacl" in test_name:
598 feature = "-features-iacl"
599 elif "oacl" in test_name:
600 feature = "-features-oacl"
601 elif "snat" in test_name or "cop" in test_name:
602 feature = "-features"
604 feature = "-base-scale"
606 elif "ip6base" in test_name or "ip6scale" in test_name:
608 feature = "-base-scale"
610 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
611 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
612 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
614 if "macip" in test_name:
615 feature = "-features-macip"
616 elif "iacl" in test_name:
617 feature = "-features-iacl"
618 elif "oacl" in test_name:
619 feature = "-features-oacl"
621 feature = "-base-scale"
623 if "x520" in test_name:
625 elif "x710" in test_name:
627 elif "xl710" in test_name:
629 elif "xxv710" in test_name:
635 if "64b" in test_name:
637 elif "78b" in test_name:
639 elif "imix" in test_name:
641 elif "9000b" in test_name:
643 elif "1518b" in test_name:
645 elif "114b" in test_name:
649 anchor += framesize + '-'
651 if "1t1c" in test_name:
653 elif "2t2c" in test_name:
655 elif "4t4c" in test_name:
657 elif "2t1c" in test_name:
659 elif "4t2c" in test_name:
661 elif "8t4c" in test_name:
664 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
668 def table_performance_trending_dashboard_html(table, input_data):
669 """Generate the table(s) with algorithm:
670 table_performance_trending_dashboard_html specified in the specification
673 :param table: Table to generate.
674 :param input_data: Data to process.
676 :type input_data: InputData
679 testbed = table.get("testbed", None)
681 logging.error("The testbed is not defined for the table '{0}'.".
682 format(table.get("title", "")))
685 logging.info(" Generating the table {0} ...".
686 format(table.get("title", "")))
689 with open(table["input-file"], 'rb') as csv_file:
690 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
691 csv_lst = [item for item in csv_content]
693 logging.warning("The input file is not defined.")
695 except csv.Error as err:
696 logging.warning("Not possible to process the file '{0}'.\n{1}".
697 format(table["input-file"], err))
701 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
704 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
705 for idx, item in enumerate(csv_lst[0]):
706 alignment = "left" if idx == 0 else "center"
707 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
711 colors = {"regression": ("#ffcccc", "#ff9999"),
712 "progression": ("#c6ecc6", "#9fdf9f"),
713 "normal": ("#e9f1fb", "#d4e4f7")}
714 for r_idx, row in enumerate(csv_lst[1:]):
718 color = "progression"
721 background = colors[color][r_idx % 2]
722 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
725 for c_idx, item in enumerate(row):
726 alignment = "left" if c_idx == 0 else "center"
727 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
730 url = _generate_url("../trending/", testbed, item)
731 ref = ET.SubElement(td, "a", attrib=dict(href=url))
736 with open(table["output-file"], 'w') as html_file:
737 logging.info(" Writing file: '{0}'".format(table["output-file"]))
738 html_file.write(".. raw:: html\n\n\t")
739 html_file.write(ET.tostring(dashboard))
740 html_file.write("\n\t<p><br><br></p>\n")
742 logging.warning("The output file is not defined.")
746 def table_failed_tests(table, input_data):
747 """Generate the table(s) with algorithm: table_failed_tests
748 specified in the specification file.
750 :param table: Table to generate.
751 :param input_data: Data to process.
752 :type table: pandas.Series
753 :type input_data: InputData
756 logging.info(" Generating the table {0} ...".
757 format(table.get("title", "")))
760 logging.info(" Creating the data set for the {0} '{1}'.".
761 format(table.get("type", ""), table.get("title", "")))
762 data = input_data.filter_data(table, continue_on_error=True)
764 # Prepare the header of the tables
765 header = ["Test Case",
767 "Last Failure [Time]",
768 "Last Failure [VPP-Build-Id]",
769 "Last Failure [CSIT-Job-Build-Id]"]
771 # Generate the data for the table according to the model in the table
775 timeperiod = timedelta(int(table.get("window", 7)))
778 for job, builds in table["data"].items():
781 for tst_name, tst_data in data[job][build].iteritems():
782 if tst_name.lower() in table["ignore-list"]:
784 if tbl_dict.get(tst_name, None) is None:
785 groups = re.search(REGEX_NIC, tst_data["parent"])
788 nic = groups.group(0)
789 tbl_dict[tst_name] = {
790 "name": "{0}-{1}".format(nic, tst_data["name"]),
791 "data": OrderedDict()}
793 generated = input_data.metadata(job, build).\
797 then = dt.strptime(generated, "%Y%m%d %H:%M")
798 if (now - then) <= timeperiod:
799 tbl_dict[tst_name]["data"][build] = (
802 input_data.metadata(job, build).get("version", ""),
804 except (TypeError, KeyError) as err:
805 logging.warning("tst_name: {} - err: {}".
806 format(tst_name, repr(err)))
810 for tst_data in tbl_dict.values():
812 for val in tst_data["data"].values():
815 fails_last_date = val[1]
816 fails_last_vpp = val[2]
817 fails_last_csit = val[3]
819 max_fails = fails_nr if fails_nr > max_fails else max_fails
820 tbl_lst.append([tst_data["name"],
824 "mrr-daily-build-{0}".format(fails_last_csit)])
826 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
828 for nrf in range(max_fails, -1, -1):
829 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
830 tbl_sorted.extend(tbl_fails)
831 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
833 logging.info(" Writing file: '{0}'".format(file_name))
834 with open(file_name, "w") as file_handler:
835 file_handler.write(",".join(header) + "\n")
836 for test in tbl_sorted:
837 file_handler.write(",".join([str(item) for item in test]) + '\n')
839 txt_file_name = "{0}.txt".format(table["output-file"])
840 logging.info(" Writing file: '{0}'".format(txt_file_name))
841 convert_csv_to_pretty_txt(file_name, txt_file_name)
844 def table_failed_tests_html(table, input_data):
845 """Generate the table(s) with algorithm: table_failed_tests_html
846 specified in the specification file.
848 :param table: Table to generate.
849 :param input_data: Data to process.
850 :type table: pandas.Series
851 :type input_data: InputData
854 testbed = table.get("testbed", None)
856 logging.error("The testbed is not defined for the table '{0}'.".
857 format(table.get("title", "")))
860 logging.info(" Generating the table {0} ...".
861 format(table.get("title", "")))
864 with open(table["input-file"], 'rb') as csv_file:
865 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
866 csv_lst = [item for item in csv_content]
868 logging.warning("The input file is not defined.")
870 except csv.Error as err:
871 logging.warning("Not possible to process the file '{0}'.\n{1}".
872 format(table["input-file"], err))
876 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
879 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
880 for idx, item in enumerate(csv_lst[0]):
881 alignment = "left" if idx == 0 else "center"
882 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
886 colors = ("#e9f1fb", "#d4e4f7")
887 for r_idx, row in enumerate(csv_lst[1:]):
888 background = colors[r_idx % 2]
889 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
892 for c_idx, item in enumerate(row):
893 alignment = "left" if c_idx == 0 else "center"
894 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
897 url = _generate_url("../trending/", testbed, item)
898 ref = ET.SubElement(td, "a", attrib=dict(href=url))
903 with open(table["output-file"], 'w') as html_file:
904 logging.info(" Writing file: '{0}'".format(table["output-file"]))
905 html_file.write(".. raw:: html\n\n\t")
906 html_file.write(ET.tostring(failed_tests))
907 html_file.write("\n\t<p><br><br></p>\n")
909 logging.warning("The output file is not defined.")