X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Finput_data_parser.py;h=9c0e38073ca2f49e190f519e4b837c3a0d90c7a2;hp=37532c83d273c31d68d432ac4c745aa3fbae8c3d;hb=cee3ad0f9cc29ffc67d9c87c58920252671beb21;hpb=2dd27f5a638b5231c0f074ca61e6b67fed9d1faf diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index 37532c83d2..9c0e38073c 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -23,7 +23,6 @@ import multiprocessing import os import re import resource -import objgraph import pandas as pd import logging @@ -1189,10 +1188,13 @@ class InputData(object): return checker.data - def _download_and_parse_build(self, job, build, repeat, pid=10000): + def _download_and_parse_build(self, pid, data_queue, job, build, repeat): """Download and parse the input data file. :param pid: PID of the process executing this method. + :param data_queue: Shared memory between processes. Queue which keeps + the result data. This data is then read by the main process and used + in further processing. :param job: Name of the Jenkins job which generated the processed input file. :param build: Information about the Jenkins build which generated the @@ -1200,6 +1202,7 @@ class InputData(object): :param repeat: Repeat the download specified number of times if not successful. :type pid: int + :type data_queue: multiprocessing.Manager().Queue() :type job: str :type build: dict :type repeat: int @@ -1280,6 +1283,14 @@ class InputData(object): format(full_name, repr(err)))) logs.append(("INFO", " Done.")) + result = { + "data": data, + "state": state, + "job": job, + "build": build + } + data_queue.put(result) + for level, line in logs: if level == "INFO": logging.info(line) @@ -1292,7 +1303,8 @@ class InputData(object): elif level == "WARNING": logging.warning(line) - return {"data": data, "state": state, "job": job, "build": build} + logging.info("Memory allocation: {0:,d}MB".format( + resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)) def download_and_parse_data(self, repeat=1): """Download the input data files, parse input data from input files and @@ -1305,36 +1317,67 @@ class InputData(object): logging.info("Downloading and parsing input files ...") + work_queue = multiprocessing.JoinableQueue() + manager = multiprocessing.Manager() + data_queue = manager.Queue() + cpus = multiprocessing.cpu_count() + + workers = list() + for cpu in range(cpus): + worker = Worker(work_queue, + data_queue, + self._download_and_parse_build) + worker.daemon = True + worker.start() + workers.append(worker) + os.system("taskset -p -c {0} {1} > /dev/null 2>&1". + format(cpu, worker.pid)) + for job, builds in self._cfg.builds.items(): for build in builds: + work_queue.put((job, build, repeat)) + + work_queue.join() + + logging.info("Done.") + logging.info("Collecting data:") + + while not data_queue.empty(): + result = data_queue.get() + + job = result["job"] + build_nr = result["build"]["build"] + logging.info(" {job}-{build}".format(job=job, build=build_nr)) - result = self._download_and_parse_build(job, build, repeat) - build_nr = result["build"]["build"] + if result["data"]: + data = result["data"] + build_data = pd.Series({ + "metadata": pd.Series( + data["metadata"].values(), + index=data["metadata"].keys()), + "suites": pd.Series(data["suites"].values(), + index=data["suites"].keys()), + "tests": pd.Series(data["tests"].values(), + index=data["tests"].keys())}) - if result["data"]: - data = result["data"] - build_data = pd.Series({ - "metadata": pd.Series( - data["metadata"].values(), - index=data["metadata"].keys()), - "suites": pd.Series(data["suites"].values(), - index=data["suites"].keys()), - "tests": pd.Series(data["tests"].values(), - index=data["tests"].keys())}) + if self._input_data.get(job, None) is None: + self._input_data[job] = pd.Series() + self._input_data[job][str(build_nr)] = build_data - if self._input_data.get(job, None) is None: - self._input_data[job] = pd.Series() - self._input_data[job][str(build_nr)] = build_data + self._cfg.set_input_file_name( + job, build_nr, result["build"]["file-name"]) - self._cfg.set_input_file_name( - job, build_nr, result["build"]["file-name"]) + self._cfg.set_input_state(job, build_nr, result["state"]) - self._cfg.set_input_state(job, build_nr, result["state"]) + logging.info("Memory allocation: {0:,d}MB".format( + resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)) - logging.info("ru_maxrss = {0}".format( - resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) + del data_queue - logging.info(objgraph.most_common_types()) + # Terminate all workers + for worker in workers: + worker.terminate() + worker.join() logging.info("Done.")