1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("vat-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 if column["data"].split(" ")[1] in ("vat-history",
166 col_data = replace(col_data, " |br| ", "",
168 col_data = " |prein| {0} |preout| ".\
169 format(col_data[:-5])
170 row_lst.append('"{0}"'.format(col_data))
172 row_lst.append("No data")
173 table_lst.append(row_lst)
175 # Write the data to file
177 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
178 table["output-file-ext"])
179 logging.info(" Writing file: '{}'".format(file_name))
180 with open(file_name, "w") as file_handler:
181 file_handler.write(",".join(header) + "\n")
182 for item in table_lst:
183 file_handler.write(",".join(item) + "\n")
185 logging.info(" Done.")
188 def table_performance_comparison(table, input_data):
189 """Generate the table(s) with algorithm: table_performance_comparison
190 specified in the specification file.
192 :param table: Table to generate.
193 :param input_data: Data to process.
194 :type table: pandas.Series
195 :type input_data: InputData
198 logging.info(" Generating the table {0} ...".
199 format(table.get("title", "")))
202 logging.info(" Creating the data set for the {0} '{1}'.".
203 format(table.get("type", ""), table.get("title", "")))
204 data = input_data.filter_data(table, continue_on_error=True)
206 # Prepare the header of the tables
208 header = ["Test case", ]
210 if table["include-tests"] == "MRR":
211 hdr_param = "Receive Rate"
213 hdr_param = "Throughput"
215 history = table.get("history", None)
219 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
220 "{0} Stdev [Mpps]".format(item["title"])])
222 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
223 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
224 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
227 header_str = ",".join(header) + "\n"
228 except (AttributeError, KeyError) as err:
229 logging.error("The model is invalid, missing parameter: {0}".
233 # Prepare data to the table:
235 for job, builds in table["reference"]["data"].items():
237 for tst_name, tst_data in data[job][str(build)].iteritems():
238 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
239 replace("-ndrpdr", "").replace("-pdrdisc", "").\
240 replace("-ndrdisc", "").replace("-pdr", "").\
241 replace("-ndr", "").\
242 replace("1t1c", "1c").replace("2t1c", "1c").\
243 replace("2t2c", "2c").replace("4t2c", "2c").\
244 replace("4t4c", "4c").replace("8t4c", "4c")
245 if "across topologies" in table["title"].lower():
246 tst_name_mod = tst_name_mod.replace("2n1l-", "")
247 if tbl_dict.get(tst_name_mod, None) is None:
248 groups = re.search(REGEX_NIC, tst_data["parent"])
249 nic = groups.group(0) if groups else ""
250 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
252 if "across testbeds" in table["title"].lower() or \
253 "across topologies" in table["title"].lower():
255 replace("1t1c", "1c").replace("2t1c", "1c").\
256 replace("2t2c", "2c").replace("4t2c", "2c").\
257 replace("4t4c", "4c").replace("8t4c", "4c")
258 tbl_dict[tst_name_mod] = {"name": name,
262 # TODO: Re-work when NDRPDRDISC tests are not used
263 if table["include-tests"] == "MRR":
264 tbl_dict[tst_name_mod]["ref-data"]. \
265 append(tst_data["result"]["receive-rate"].avg)
266 elif table["include-tests"] == "PDR":
267 if tst_data["type"] == "PDR":
268 tbl_dict[tst_name_mod]["ref-data"]. \
269 append(tst_data["throughput"]["value"])
270 elif tst_data["type"] == "NDRPDR":
271 tbl_dict[tst_name_mod]["ref-data"].append(
272 tst_data["throughput"]["PDR"]["LOWER"])
273 elif table["include-tests"] == "NDR":
274 if tst_data["type"] == "NDR":
275 tbl_dict[tst_name_mod]["ref-data"]. \
276 append(tst_data["throughput"]["value"])
277 elif tst_data["type"] == "NDRPDR":
278 tbl_dict[tst_name_mod]["ref-data"].append(
279 tst_data["throughput"]["NDR"]["LOWER"])
283 pass # No data in output.xml for this test
285 for job, builds in table["compare"]["data"].items():
287 for tst_name, tst_data in data[job][str(build)].iteritems():
288 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
289 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
290 replace("-ndrdisc", "").replace("-pdr", ""). \
291 replace("-ndr", "").\
292 replace("1t1c", "1c").replace("2t1c", "1c").\
293 replace("2t2c", "2c").replace("4t2c", "2c").\
294 replace("4t4c", "4c").replace("8t4c", "4c")
295 if "across topologies" in table["title"].lower():
296 tst_name_mod = tst_name_mod.replace("2n1l-", "")
298 # TODO: Re-work when NDRPDRDISC tests are not used
299 if table["include-tests"] == "MRR":
300 tbl_dict[tst_name_mod]["cmp-data"]. \
301 append(tst_data["result"]["receive-rate"].avg)
302 elif table["include-tests"] == "PDR":
303 if tst_data["type"] == "PDR":
304 tbl_dict[tst_name_mod]["cmp-data"]. \
305 append(tst_data["throughput"]["value"])
306 elif tst_data["type"] == "NDRPDR":
307 tbl_dict[tst_name_mod]["cmp-data"].append(
308 tst_data["throughput"]["PDR"]["LOWER"])
309 elif table["include-tests"] == "NDR":
310 if tst_data["type"] == "NDR":
311 tbl_dict[tst_name_mod]["cmp-data"]. \
312 append(tst_data["throughput"]["value"])
313 elif tst_data["type"] == "NDRPDR":
314 tbl_dict[tst_name_mod]["cmp-data"].append(
315 tst_data["throughput"]["NDR"]["LOWER"])
321 tbl_dict.pop(tst_name_mod, None)
324 for job, builds in item["data"].items():
326 for tst_name, tst_data in data[job][str(build)].iteritems():
327 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
328 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
329 replace("-ndrdisc", "").replace("-pdr", ""). \
330 replace("-ndr", "").\
331 replace("1t1c", "1c").replace("2t1c", "1c").\
332 replace("2t2c", "2c").replace("4t2c", "2c").\
333 replace("4t4c", "4c").replace("8t4c", "4c")
334 if "across topologies" in table["title"].lower():
335 tst_name_mod = tst_name_mod.replace("2n1l-", "")
336 if tbl_dict.get(tst_name_mod, None) is None:
338 if tbl_dict[tst_name_mod].get("history", None) is None:
339 tbl_dict[tst_name_mod]["history"] = OrderedDict()
340 if tbl_dict[tst_name_mod]["history"].get(item["title"],
342 tbl_dict[tst_name_mod]["history"][item["title"]] = \
345 # TODO: Re-work when NDRPDRDISC tests are not used
346 if table["include-tests"] == "MRR":
347 tbl_dict[tst_name_mod]["history"][item["title"
348 ]].append(tst_data["result"]["receive-rate"].
350 elif table["include-tests"] == "PDR":
351 if tst_data["type"] == "PDR":
352 tbl_dict[tst_name_mod]["history"][
354 append(tst_data["throughput"]["value"])
355 elif tst_data["type"] == "NDRPDR":
356 tbl_dict[tst_name_mod]["history"][item[
357 "title"]].append(tst_data["throughput"][
359 elif table["include-tests"] == "NDR":
360 if tst_data["type"] == "NDR":
361 tbl_dict[tst_name_mod]["history"][
363 append(tst_data["throughput"]["value"])
364 elif tst_data["type"] == "NDRPDR":
365 tbl_dict[tst_name_mod]["history"][item[
366 "title"]].append(tst_data["throughput"][
370 except (TypeError, KeyError):
374 for tst_name in tbl_dict.keys():
375 item = [tbl_dict[tst_name]["name"], ]
377 if tbl_dict[tst_name].get("history", None) is not None:
378 for hist_data in tbl_dict[tst_name]["history"].values():
380 item.append(round(mean(hist_data) / 1000000, 2))
381 item.append(round(stdev(hist_data) / 1000000, 2))
383 item.extend([None, None])
385 item.extend([None, None])
386 data_t = tbl_dict[tst_name]["ref-data"]
388 item.append(round(mean(data_t) / 1000000, 2))
389 item.append(round(stdev(data_t) / 1000000, 2))
391 item.extend([None, None])
392 data_t = tbl_dict[tst_name]["cmp-data"]
394 item.append(round(mean(data_t) / 1000000, 2))
395 item.append(round(stdev(data_t) / 1000000, 2))
397 item.extend([None, None])
398 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
399 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
400 if len(item) == len(header):
403 # Sort the table according to the relative change
404 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
406 # Generate csv tables:
407 csv_file = "{0}.csv".format(table["output-file"])
408 with open(csv_file, "w") as file_handler:
409 file_handler.write(header_str)
411 file_handler.write(",".join([str(item) for item in test]) + "\n")
413 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
416 def table_nics_comparison(table, input_data):
417 """Generate the table(s) with algorithm: table_nics_comparison
418 specified in the specification file.
420 :param table: Table to generate.
421 :param input_data: Data to process.
422 :type table: pandas.Series
423 :type input_data: InputData
426 logging.info(" Generating the table {0} ...".
427 format(table.get("title", "")))
430 logging.info(" Creating the data set for the {0} '{1}'.".
431 format(table.get("type", ""), table.get("title", "")))
432 data = input_data.filter_data(table, continue_on_error=True)
434 # Prepare the header of the tables
436 header = ["Test case", ]
438 if table["include-tests"] == "MRR":
439 hdr_param = "Receive Rate"
441 hdr_param = "Throughput"
444 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
445 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
446 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
447 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
449 header_str = ",".join(header) + "\n"
450 except (AttributeError, KeyError) as err:
451 logging.error("The model is invalid, missing parameter: {0}".
455 # Prepare data to the table:
457 for job, builds in table["data"].items():
459 for tst_name, tst_data in data[job][str(build)].iteritems():
460 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
461 replace("-ndrpdr", "").replace("-pdrdisc", "").\
462 replace("-ndrdisc", "").replace("-pdr", "").\
463 replace("-ndr", "").\
464 replace("1t1c", "1c").replace("2t1c", "1c").\
465 replace("2t2c", "2c").replace("4t2c", "2c").\
466 replace("4t4c", "4c").replace("8t4c", "4c")
467 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
468 if tbl_dict.get(tst_name_mod, None) is None:
469 name = "-".join(tst_data["name"].split("-")[:-1])
470 tbl_dict[tst_name_mod] = {"name": name,
474 if table["include-tests"] == "MRR":
475 result = tst_data["result"]["receive-rate"].avg
476 elif table["include-tests"] == "PDR":
477 result = tst_data["throughput"]["PDR"]["LOWER"]
478 elif table["include-tests"] == "NDR":
479 result = tst_data["throughput"]["NDR"]["LOWER"]
484 if table["reference"]["nic"] in tst_data["tags"]:
485 tbl_dict[tst_name_mod]["ref-data"].append(result)
486 elif table["compare"]["nic"] in tst_data["tags"]:
487 tbl_dict[tst_name_mod]["cmp-data"].append(result)
488 except (TypeError, KeyError) as err:
489 logging.debug("No data for {0}".format(tst_name))
490 logging.debug(repr(err))
491 # No data in output.xml for this test
494 for tst_name in tbl_dict.keys():
495 item = [tbl_dict[tst_name]["name"], ]
496 data_t = tbl_dict[tst_name]["ref-data"]
498 item.append(round(mean(data_t) / 1000000, 2))
499 item.append(round(stdev(data_t) / 1000000, 2))
501 item.extend([None, None])
502 data_t = tbl_dict[tst_name]["cmp-data"]
504 item.append(round(mean(data_t) / 1000000, 2))
505 item.append(round(stdev(data_t) / 1000000, 2))
507 item.extend([None, None])
508 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
509 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
510 if len(item) == len(header):
513 # Sort the table according to the relative change
514 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
516 # Generate csv tables:
517 csv_file = "{0}.csv".format(table["output-file"])
518 with open(csv_file, "w") as file_handler:
519 file_handler.write(header_str)
521 file_handler.write(",".join([str(item) for item in test]) + "\n")
523 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
526 def table_performance_trending_dashboard(table, input_data):
527 """Generate the table(s) with algorithm:
528 table_performance_trending_dashboard
529 specified in the specification file.
531 :param table: Table to generate.
532 :param input_data: Data to process.
533 :type table: pandas.Series
534 :type input_data: InputData
537 logging.info(" Generating the table {0} ...".
538 format(table.get("title", "")))
541 logging.info(" Creating the data set for the {0} '{1}'.".
542 format(table.get("type", ""), table.get("title", "")))
543 data = input_data.filter_data(table, continue_on_error=True)
545 # Prepare the header of the tables
546 header = ["Test Case",
548 "Short-Term Change [%]",
549 "Long-Term Change [%]",
553 header_str = ",".join(header) + "\n"
555 # Prepare data to the table:
557 for job, builds in table["data"].items():
559 for tst_name, tst_data in data[job][str(build)].iteritems():
560 if tst_name.lower() in table["ignore-list"]:
562 if tbl_dict.get(tst_name, None) is None:
563 groups = re.search(REGEX_NIC, tst_data["parent"])
566 nic = groups.group(0)
567 tbl_dict[tst_name] = {
568 "name": "{0}-{1}".format(nic, tst_data["name"]),
569 "data": OrderedDict()}
571 tbl_dict[tst_name]["data"][str(build)] = \
572 tst_data["result"]["receive-rate"]
573 except (TypeError, KeyError):
574 pass # No data in output.xml for this test
577 for tst_name in tbl_dict.keys():
578 data_t = tbl_dict[tst_name]["data"]
582 classification_lst, avgs = classify_anomalies(data_t)
584 win_size = min(len(data_t), table["window"])
585 long_win_size = min(len(data_t), table["long-trend-window"])
589 [x for x in avgs[-long_win_size:-win_size]
594 avg_week_ago = avgs[max(-win_size, -len(avgs))]
596 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
597 rel_change_last = nan
599 rel_change_last = round(
600 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
602 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
603 rel_change_long = nan
605 rel_change_long = round(
606 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
608 if classification_lst:
609 if isnan(rel_change_last) and isnan(rel_change_long):
611 if (isnan(last_avg) or
612 isnan(rel_change_last) or
613 isnan(rel_change_long)):
616 [tbl_dict[tst_name]["name"],
617 round(last_avg / 1000000, 2),
620 classification_lst[-win_size:].count("regression"),
621 classification_lst[-win_size:].count("progression")])
623 tbl_lst.sort(key=lambda rel: rel[0])
626 for nrr in range(table["window"], -1, -1):
627 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
628 for nrp in range(table["window"], -1, -1):
629 tbl_out = [item for item in tbl_reg if item[5] == nrp]
630 tbl_out.sort(key=lambda rel: rel[2])
631 tbl_sorted.extend(tbl_out)
633 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
635 logging.info(" Writing file: '{0}'".format(file_name))
636 with open(file_name, "w") as file_handler:
637 file_handler.write(header_str)
638 for test in tbl_sorted:
639 file_handler.write(",".join([str(item) for item in test]) + '\n')
641 txt_file_name = "{0}.txt".format(table["output-file"])
642 logging.info(" Writing file: '{0}'".format(txt_file_name))
643 convert_csv_to_pretty_txt(file_name, txt_file_name)
646 def _generate_url(base, testbed, test_name):
647 """Generate URL to a trending plot from the name of the test case.
649 :param base: The base part of URL common to all test cases.
650 :param testbed: The testbed used for testing.
651 :param test_name: The name of the test case.
655 :returns: The URL to the plot with the trending data for the given test
665 if "lbdpdk" in test_name or "lbvpp" in test_name:
666 file_name = "link_bonding"
668 elif "114b" in test_name and "vhost" in test_name:
671 elif "testpmd" in test_name or "l3fwd" in test_name:
674 elif "memif" in test_name:
675 file_name = "container_memif"
678 elif "srv6" in test_name:
681 elif "vhost" in test_name:
682 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
683 file_name = "vm_vhost_l2"
684 if "114b" in test_name:
686 elif "l2xcbase" in test_name and "x520" in test_name:
687 feature = "-base-l2xc"
688 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
689 feature = "-base-l2bd"
692 elif "ip4base" in test_name:
693 file_name = "vm_vhost_ip4"
696 elif "ipsec" in test_name:
698 feature = "-base-scale"
700 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
701 file_name = "ip4_tunnels"
704 elif "ip4base" in test_name or "ip4scale" in test_name:
706 if "xl710" in test_name:
707 feature = "-base-scale-features"
708 elif "iacl" in test_name:
709 feature = "-features-iacl"
710 elif "oacl" in test_name:
711 feature = "-features-oacl"
712 elif "snat" in test_name or "cop" in test_name:
713 feature = "-features"
715 feature = "-base-scale"
717 elif "ip6base" in test_name or "ip6scale" in test_name:
719 feature = "-base-scale"
721 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
722 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
723 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
725 if "macip" in test_name:
726 feature = "-features-macip"
727 elif "iacl" in test_name:
728 feature = "-features-iacl"
729 elif "oacl" in test_name:
730 feature = "-features-oacl"
732 feature = "-base-scale"
734 if "x520" in test_name:
736 elif "x710" in test_name:
738 elif "xl710" in test_name:
740 elif "xxv710" in test_name:
742 elif "vic1227" in test_name:
744 elif "vic1385" in test_name:
750 if "64b" in test_name:
752 elif "78b" in test_name:
754 elif "imix" in test_name:
756 elif "9000b" in test_name:
758 elif "1518b" in test_name:
760 elif "114b" in test_name:
764 anchor += framesize + '-'
766 if "1t1c" in test_name:
768 elif "2t2c" in test_name:
770 elif "4t4c" in test_name:
772 elif "2t1c" in test_name:
774 elif "4t2c" in test_name:
776 elif "8t4c" in test_name:
779 return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
783 def table_performance_trending_dashboard_html(table, input_data):
784 """Generate the table(s) with algorithm:
785 table_performance_trending_dashboard_html specified in the specification
788 :param table: Table to generate.
789 :param input_data: Data to process.
791 :type input_data: InputData
794 testbed = table.get("testbed", None)
796 logging.error("The testbed is not defined for the table '{0}'.".
797 format(table.get("title", "")))
800 logging.info(" Generating the table {0} ...".
801 format(table.get("title", "")))
804 with open(table["input-file"], 'rb') as csv_file:
805 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
806 csv_lst = [item for item in csv_content]
808 logging.warning("The input file is not defined.")
810 except csv.Error as err:
811 logging.warning("Not possible to process the file '{0}'.\n{1}".
812 format(table["input-file"], err))
816 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
819 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
820 for idx, item in enumerate(csv_lst[0]):
821 alignment = "left" if idx == 0 else "center"
822 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
826 colors = {"regression": ("#ffcccc", "#ff9999"),
827 "progression": ("#c6ecc6", "#9fdf9f"),
828 "normal": ("#e9f1fb", "#d4e4f7")}
829 for r_idx, row in enumerate(csv_lst[1:]):
833 color = "progression"
836 background = colors[color][r_idx % 2]
837 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
840 for c_idx, item in enumerate(row):
841 alignment = "left" if c_idx == 0 else "center"
842 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
845 url = _generate_url("../trending/", testbed, item)
846 ref = ET.SubElement(td, "a", attrib=dict(href=url))
851 with open(table["output-file"], 'w') as html_file:
852 logging.info(" Writing file: '{0}'".format(table["output-file"]))
853 html_file.write(".. raw:: html\n\n\t")
854 html_file.write(ET.tostring(dashboard))
855 html_file.write("\n\t<p><br><br></p>\n")
857 logging.warning("The output file is not defined.")
861 def table_failed_tests(table, input_data):
862 """Generate the table(s) with algorithm: table_failed_tests
863 specified in the specification file.
865 :param table: Table to generate.
866 :param input_data: Data to process.
867 :type table: pandas.Series
868 :type input_data: InputData
871 logging.info(" Generating the table {0} ...".
872 format(table.get("title", "")))
875 logging.info(" Creating the data set for the {0} '{1}'.".
876 format(table.get("type", ""), table.get("title", "")))
877 data = input_data.filter_data(table, continue_on_error=True)
879 # Prepare the header of the tables
880 header = ["Test Case",
882 "Last Failure [Time]",
883 "Last Failure [VPP-Build-Id]",
884 "Last Failure [CSIT-Job-Build-Id]"]
886 # Generate the data for the table according to the model in the table
890 timeperiod = timedelta(int(table.get("window", 7)))
893 for job, builds in table["data"].items():
896 for tst_name, tst_data in data[job][build].iteritems():
897 if tst_name.lower() in table["ignore-list"]:
899 if tbl_dict.get(tst_name, None) is None:
900 groups = re.search(REGEX_NIC, tst_data["parent"])
903 nic = groups.group(0)
904 tbl_dict[tst_name] = {
905 "name": "{0}-{1}".format(nic, tst_data["name"]),
906 "data": OrderedDict()}
908 generated = input_data.metadata(job, build).\
912 then = dt.strptime(generated, "%Y%m%d %H:%M")
913 if (now - then) <= timeperiod:
914 tbl_dict[tst_name]["data"][build] = (
917 input_data.metadata(job, build).get("version", ""),
919 except (TypeError, KeyError) as err:
920 logging.warning("tst_name: {} - err: {}".
921 format(tst_name, repr(err)))
925 for tst_data in tbl_dict.values():
927 for val in tst_data["data"].values():
930 fails_last_date = val[1]
931 fails_last_vpp = val[2]
932 fails_last_csit = val[3]
934 max_fails = fails_nr if fails_nr > max_fails else max_fails
935 tbl_lst.append([tst_data["name"],
939 "mrr-daily-build-{0}".format(fails_last_csit)])
941 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
943 for nrf in range(max_fails, -1, -1):
944 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
945 tbl_sorted.extend(tbl_fails)
946 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
948 logging.info(" Writing file: '{0}'".format(file_name))
949 with open(file_name, "w") as file_handler:
950 file_handler.write(",".join(header) + "\n")
951 for test in tbl_sorted:
952 file_handler.write(",".join([str(item) for item in test]) + '\n')
954 txt_file_name = "{0}.txt".format(table["output-file"])
955 logging.info(" Writing file: '{0}'".format(txt_file_name))
956 convert_csv_to_pretty_txt(file_name, txt_file_name)
959 def table_failed_tests_html(table, input_data):
960 """Generate the table(s) with algorithm: table_failed_tests_html
961 specified in the specification file.
963 :param table: Table to generate.
964 :param input_data: Data to process.
965 :type table: pandas.Series
966 :type input_data: InputData
969 testbed = table.get("testbed", None)
971 logging.error("The testbed is not defined for the table '{0}'.".
972 format(table.get("title", "")))
975 logging.info(" Generating the table {0} ...".
976 format(table.get("title", "")))
979 with open(table["input-file"], 'rb') as csv_file:
980 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
981 csv_lst = [item for item in csv_content]
983 logging.warning("The input file is not defined.")
985 except csv.Error as err:
986 logging.warning("Not possible to process the file '{0}'.\n{1}".
987 format(table["input-file"], err))
991 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
994 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
995 for idx, item in enumerate(csv_lst[0]):
996 alignment = "left" if idx == 0 else "center"
997 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1001 colors = ("#e9f1fb", "#d4e4f7")
1002 for r_idx, row in enumerate(csv_lst[1:]):
1003 background = colors[r_idx % 2]
1004 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1007 for c_idx, item in enumerate(row):
1008 alignment = "left" if c_idx == 0 else "center"
1009 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1012 url = _generate_url("../trending/", testbed, item)
1013 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1018 with open(table["output-file"], 'w') as html_file:
1019 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1020 html_file.write(".. raw:: html\n\n\t")
1021 html_file.write(ET.tostring(failed_tests))
1022 html_file.write("\n\t<p><br><br></p>\n")
1024 logging.warning("The output file is not defined.")