+ try:
+ layout = deepcopy(graph["layout"])
+ except KeyError as err:
+ logging.error("Finished with error: No layout defined")
+ logging.error(repr(err))
+ return
+ if groups:
+ show = list()
+ for i in range(len(visibility)):
+ visible = list()
+ for r in range(len(visibility)):
+ for _ in range(len(visibility[r])):
+ visible.append(i == r)
+ show.append(visible)
+
+ buttons = list()
+ buttons.append(dict(
+ label="All",
+ method="update",
+ args=[{"visible": [True for _ in range(len(show[0]))]}, ]
+ ))
+ for i in range(len(groups)):
+ try:
+ label = graph["group-names"][i]
+ except (IndexError, KeyError):
+ label = "Group {num}".format(num=i + 1)
+ buttons.append(dict(
+ label=label,
+ method="update",
+ args=[{"visible": show[i]}, ]
+ ))
+
+ layout['updatemenus'] = list([
+ dict(
+ active=0,
+ type="dropdown",
+ direction="down",
+ xanchor="left",
+ yanchor="bottom",
+ x=-0.12,
+ y=1.0,
+ buttons=buttons
+ )
+ ])
+
+ name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
+ graph["output-file-name"],
+ spec.cpta["output-file-type"])
+
+ logs.append(("INFO", " Writing the file '{0}' ...".
+ format(name_file)))
+ plpl = plgo.Figure(data=traces, layout=layout)
+ try:
+ ploff.plot(plpl, show_link=False, auto_open=False,
+ filename=name_file)
+ except plerr.PlotlyEmptyDataError:
+ logs.append(("WARNING", "No data for the plot. Skipped."))
+
+ for level, line in logs:
+ if level == "INFO":
+ logging.info(line)
+ elif level == "ERROR":
+ logging.error(line)
+ elif level == "DEBUG":
+ logging.debug(line)
+ elif level == "CRITICAL":
+ logging.critical(line)
+ elif level == "WARNING":
+ logging.warning(line)
+
+ return {"job_name": job_name, "csv_table": csv_tbl, "results": res}
+
+ builds_dict = dict()
+ for job in spec.input["builds"].keys():
+ if builds_dict.get(job, None) is None:
+ builds_dict[job] = list()
+ for build in spec.input["builds"][job]:
+ status = build["status"]
+ if status != "failed" and status != "not found" and \
+ status != "removed":
+ builds_dict[job].append(str(build["build"]))
+
+ # Create "build ID": "date" dict:
+ build_info = dict()
+ tb_tbl = spec.environment.get("testbeds", None)
+ for job_name, job_data in builds_dict.items():
+ if build_info.get(job_name, None) is None:
+ build_info[job_name] = OrderedDict()
+ for build in job_data:
+ testbed = ""
+ tb_ip = input_data.metadata(job_name, build).get("testbed", "")
+ if tb_ip and tb_tbl:
+ testbed = tb_tbl.get(tb_ip, "")
+ build_info[job_name][build] = (
+ input_data.metadata(job_name, build).get("generated", ""),
+ input_data.metadata(job_name, build).get("version", ""),
+ testbed
+ )
+
+ anomaly_classifications = dict()
+
+ # Create the header:
+ csv_tables = dict()
+ for job_name in builds_dict.keys():
+ if csv_tables.get(job_name, None) is None:
+ csv_tables[job_name] = list()
+ header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
+ csv_tables[job_name].append(header)
+ build_dates = [x[0] for x in build_info[job_name].values()]
+ header = "Build Date:," + ",".join(build_dates) + '\n'
+ csv_tables[job_name].append(header)
+ versions = [x[1] for x in build_info[job_name].values()]
+ header = "Version:," + ",".join(versions) + '\n'
+ csv_tables[job_name].append(header)
+
+ for chart in spec.cpta["plots"]:
+ result = _generate_chart(chart)
+
+ csv_tables[result["job_name"]].extend(result["csv_table"])
+
+ if anomaly_classifications.get(result["job_name"], None) is None:
+ anomaly_classifications[result["job_name"]] = dict()
+ anomaly_classifications[result["job_name"]].update(result["results"])
+
+ # Write the tables:
+ for job_name, csv_table in csv_tables.items():
+ file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
+ with open("{0}.csv".format(file_name), 'w') as file_handler:
+ file_handler.writelines(csv_table)
+
+ txt_table = None
+ with open("{0}.csv".format(file_name), 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ line_nr = 0
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ if line_nr > 1:
+ for idx, item in enumerate(row):
+ try:
+ row[idx] = str(round(float(item) / 1000000, 2))
+ except ValueError:
+ pass
+ try:
+ txt_table.add_row(row)
+ except Exception as err:
+ logging.warning("Error occurred while generating TXT "
+ "table:\n{0}".format(err))
+ line_nr += 1
+ txt_table.align["Build Number:"] = "l"
+ with open("{0}.txt".format(file_name), "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+ # Evaluate result:
+ if anomaly_classifications:
+ result = "PASS"
+ for job_name, job_data in anomaly_classifications.iteritems():
+ file_name = "{0}-regressions-{1}.txt".\
+ format(spec.cpta["output-file"], job_name)
+ with open(file_name, 'w') as txt_file:
+ for test_name, classification in job_data.iteritems():
+ if classification == "regression":
+ txt_file.write(test_name + '\n')
+ if classification == "regression" or \
+ classification == "outlier":
+ result = "FAIL"
+ file_name = "{0}-progressions-{1}.txt".\
+ format(spec.cpta["output-file"], job_name)
+ with open(file_name, 'w') as txt_file:
+ for test_name, classification in job_data.iteritems():
+ if classification == "progression":
+ txt_file.write(test_name + '\n')