+ try:
+ layout = deepcopy(graph["layout"])
+ except KeyError as err:
+ logging.error("Finished with error: No layout defined")
+ logging.error(repr(err))
+ return
+ if groups:
+ show = list()
+ for i in range(len(visibility)):
+ visible = list()
+ for r in range(len(visibility)):
+ for _ in range(len(visibility[r])):
+ visible.append(i == r)
+ show.append(visible)
+
+ buttons = list()
+ buttons.append(dict(
+ label="All",
+ method="update",
+ args=[{"visible": [True for _ in range(len(show[0]))]}, ]
+ ))
+ for i in range(len(groups)):
+ try:
+ label = graph["group-names"][i]
+ except (IndexError, KeyError):
+ label = "Group {num}".format(num=i + 1)
+ buttons.append(dict(
+ label=label,
+ method="update",
+ args=[{"visible": show[i]}, ]
+ ))
+
+ layout['updatemenus'] = list([
+ dict(
+ active=0,
+ type="dropdown",
+ direction="down",
+ xanchor="left",
+ yanchor="bottom",
+ x=-0.12,
+ y=1.0,
+ buttons=buttons
+ )
+ ])
+
+ name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
+ graph["output-file-name"],
+ spec.cpta["output-file-type"])
+
+ logs.append(("INFO", " Writing the file '{0}' ...".
+ format(name_file)))
+ plpl = plgo.Figure(data=traces, layout=layout)
+ try:
+ ploff.plot(plpl, show_link=False, auto_open=False,
+ filename=name_file)
+ except plerr.PlotlyEmptyDataError:
+ logs.append(("WARNING", "No data for the plot. Skipped."))
+
+ for level, line in logs:
+ if level == "INFO":
+ logging.info(line)
+ elif level == "ERROR":
+ logging.error(line)
+ elif level == "DEBUG":
+ logging.debug(line)
+ elif level == "CRITICAL":
+ logging.critical(line)
+ elif level == "WARNING":
+ logging.warning(line)
+
+ return {"job_name": job_name, "csv_table": csv_tbl, "results": res}
+
+ builds_dict = dict()
+ for job in spec.input["builds"].keys():
+ if builds_dict.get(job, None) is None:
+ builds_dict[job] = list()
+ for build in spec.input["builds"][job]:
+ status = build["status"]
+ if status != "failed" and status != "not found" and \
+ status != "removed":
+ builds_dict[job].append(str(build["build"]))
+
+ # Create "build ID": "date" dict:
+ build_info = dict()
+ tb_tbl = spec.environment.get("testbeds", None)
+ for job_name, job_data in builds_dict.items():
+ if build_info.get(job_name, None) is None:
+ build_info[job_name] = OrderedDict()
+ for build in job_data:
+ testbed = ""
+ tb_ip = input_data.metadata(job_name, build).get("testbed", "")
+ if tb_ip and tb_tbl:
+ testbed = tb_tbl.get(tb_ip, "")
+ build_info[job_name][build] = (
+ input_data.metadata(job_name, build).get("generated", ""),
+ input_data.metadata(job_name, build).get("version", ""),
+ testbed
+ )
+
+ anomaly_classifications = list()
+
+ # Create the header:
+ csv_tables = dict()
+ for job_name in builds_dict.keys():
+ if csv_tables.get(job_name, None) is None:
+ csv_tables[job_name] = list()
+ header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
+ csv_tables[job_name].append(header)
+ build_dates = [x[0] for x in build_info[job_name].values()]
+ header = "Build Date:," + ",".join(build_dates) + '\n'
+ csv_tables[job_name].append(header)
+ versions = [x[1] for x in build_info[job_name].values()]
+ header = "Version:," + ",".join(versions) + '\n'
+ csv_tables[job_name].append(header)
+
+ for chart in spec.cpta["plots"]:
+ result = _generate_chart(chart)
+
+ anomaly_classifications.extend(result["results"])
+ csv_tables[result["job_name"]].extend(result["csv_table"])