X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_CPTA.py;h=f57757f4511c8e8a480ff1e00e5af5e43fe25ac7;hb=d68951ac245150eeefa6e0f4156e4c1b5c9e9325;hp=1e7719153fd7b1d3cccb0310fe2201a52bbdff88;hpb=599386f5a2f1d63f6dac9f2a0be34f4ba486cf57;p=csit.git diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py index 1e7719153f..f57757f451 100644 --- a/resources/tools/presentation/generator_CPTA.py +++ b/resources/tools/presentation/generator_CPTA.py @@ -14,8 +14,6 @@ """Generation of Continuous Performance Trending and Analysis. """ -import multiprocessing -import os import logging import csv import prettytable @@ -27,8 +25,7 @@ from collections import OrderedDict from datetime import datetime from copy import deepcopy -from utils import archive_input_data, execute_command, \ - classify_anomalies, Worker +from utils import archive_input_data, execute_command, classify_anomalies # Command to build the html format of the report @@ -138,7 +135,8 @@ def generate_cpta(spec, data): css_file: css_file.write(THEME_OVERRIDES) - archive_input_data(spec) + if spec.configuration.get("archive-inputs", True): + archive_input_data(spec) logging.info("Done.") @@ -323,21 +321,19 @@ def _generate_all_charts(spec, input_data): :type input_data: InputData """ - def _generate_chart(_, data_q, graph): + def _generate_chart(graph): """Generates the chart. """ logs = list() - logging.info(" Generating the chart '{0}' ...". - format(graph.get("title", ""))) logs.append(("INFO", " Generating the chart '{0}' ...". format(graph.get("title", "")))) job_name = graph["data"].keys()[0] csv_tbl = list() - res = list() + res = dict() # Transform the data logs.append(("INFO", " Creating the data set for the {0} '{1}'.". @@ -383,22 +379,22 @@ def _generate_all_charts(spec, input_data): for group in groups: visible = list() for tag in group: - for test_name, test_data in chart_data.items(): + for tst_name, test_data in chart_data.items(): if not test_data: logs.append(("WARNING", "No data for the test '{0}'". - format(test_name))) + format(tst_name))) continue - if tag in chart_tags[test_name]: + if tag in chart_tags[tst_name]: message = "index: {index}, test: {test}".format( - index=index, test=test_name) - test_name = test_name.split('.')[-1] + index=index, test=tst_name) try: trace, rslt = _generate_trending_traces( test_data, job_name=job_name, build_info=build_info, - name='-'.join(test_name.split('-')[2:-1]), + name='-'.join(tst_name.split('.')[-1]. + split('-')[2:-1]), color=COLORS[index]) except IndexError: message = "Out of colors: {}".format(message) @@ -408,25 +404,24 @@ def _generate_all_charts(spec, input_data): continue traces.extend(trace) visible.extend([True for _ in range(len(trace))]) - res.append(rslt) + res[tst_name] = rslt index += 1 break visibility.append(visible) else: - for test_name, test_data in chart_data.items(): + for tst_name, test_data in chart_data.items(): if not test_data: logs.append(("WARNING", "No data for the test '{0}'". - format(test_name))) + format(tst_name))) continue message = "index: {index}, test: {test}".format( - index=index, test=test_name) - test_name = test_name.split('.')[-1] + index=index, test=tst_name) try: trace, rslt = _generate_trending_traces( test_data, job_name=job_name, build_info=build_info, - name='-'.join(test_name.split('-')[2:-1]), + name='-'.join(tst_name.split('.')[-1].split('-')[2:-1]), color=COLORS[index]) except IndexError: message = "Out of colors: {}".format(message) @@ -435,7 +430,7 @@ def _generate_all_charts(spec, input_data): index += 1 continue traces.extend(trace) - res.append(rslt) + res[tst_name] = rslt index += 1 if traces: @@ -498,13 +493,19 @@ def _generate_all_charts(spec, input_data): except plerr.PlotlyEmptyDataError: logs.append(("WARNING", "No data for the plot. Skipped.")) - data_out = { - "job_name": job_name, - "csv_table": csv_tbl, - "results": res, - "logs": logs - } - data_q.put(data_out) + for level, line in logs: + if level == "INFO": + logging.info(line) + elif level == "ERROR": + logging.error(line) + elif level == "DEBUG": + logging.debug(line) + elif level == "CRITICAL": + logging.critical(line) + elif level == "WARNING": + logging.warning(line) + + return {"job_name": job_name, "csv_table": csv_tbl, "results": res} builds_dict = dict() for job in spec.input["builds"].keys(): @@ -533,27 +534,7 @@ def _generate_all_charts(spec, input_data): testbed ) - work_queue = multiprocessing.JoinableQueue() - manager = multiprocessing.Manager() - data_queue = manager.Queue() - cpus = multiprocessing.cpu_count() - - workers = list() - for cpu in range(cpus): - worker = Worker(work_queue, - data_queue, - _generate_chart) - worker.daemon = True - worker.start() - workers.append(worker) - os.system("taskset -p -c {0} {1} > /dev/null 2>&1". - format(cpu, worker.pid)) - - for chart in spec.cpta["plots"]: - work_queue.put((chart, )) - work_queue.join() - - anomaly_classifications = list() + anomaly_classifications = dict() # Create the header: csv_tables = dict() @@ -569,30 +550,14 @@ def _generate_all_charts(spec, input_data): header = "Version:," + ",".join(versions) + '\n' csv_tables[job_name].append(header) - while not data_queue.empty(): - result = data_queue.get() + for chart in spec.cpta["plots"]: + result = _generate_chart(chart) - anomaly_classifications.extend(result["results"]) csv_tables[result["job_name"]].extend(result["csv_table"]) - for item in result["logs"]: - if item[0] == "INFO": - logging.info(item[1]) - elif item[0] == "ERROR": - logging.error(item[1]) - elif item[0] == "DEBUG": - logging.debug(item[1]) - elif item[0] == "CRITICAL": - logging.critical(item[1]) - elif item[0] == "WARNING": - logging.warning(item[1]) - - del data_queue - - # Terminate all workers - for worker in workers: - worker.terminate() - worker.join() + if anomaly_classifications.get(result["job_name"], None) is None: + anomaly_classifications[result["job_name"]] = dict() + anomaly_classifications[result["job_name"]].update(result["results"]) # Write the tables: for job_name, csv_table in csv_tables.items(): @@ -627,10 +592,22 @@ def _generate_all_charts(spec, input_data): # Evaluate result: if anomaly_classifications: result = "PASS" - for classification in anomaly_classifications: - if classification == "regression" or classification == "outlier": - result = "FAIL" - break + for job_name, job_data in anomaly_classifications.iteritems(): + file_name = "{0}-regressions-{1}.txt".\ + format(spec.cpta["output-file"], job_name) + with open(file_name, 'w') as txt_file: + for test_name, classification in job_data.iteritems(): + if classification == "regression": + txt_file.write(test_name + '\n') + if classification == "regression" or \ + classification == "outlier": + result = "FAIL" + file_name = "{0}-progressions-{1}.txt".\ + format(spec.cpta["output-file"], job_name) + with open(file_name, 'w') as txt_file: + for test_name, classification in job_data.iteritems(): + if classification == "progression": + txt_file.write(test_name + '\n') else: result = "FAIL"