X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Finput_data_parser.py;h=fc56af75e2f0ebfe9da8f8b3c1869c4945fcf805;hp=84245c429ce491f497c588b4b6ccfa38e6663d4d;hb=391a0738554e19b6f5a08991cde6061ed4f18a79;hpb=5aa2e158983f61df6d26129ad566b906051fc5b5 diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index 84245c429c..fc56af75e2 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -19,11 +19,11 @@ - filter the data using tags, """ -import multiprocessing -import os import re +import resource import pandas as pd import logging +import prettytable from robot.api import ExecutionResult, ResultVisitor from robot import errors @@ -37,7 +37,6 @@ from json import loads from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory from input_data_files import download_and_unzip_data_file -from utils import Worker # Separator used in file names @@ -282,8 +281,7 @@ class ExecutionChecker(ResultVisitor): REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|" r"VPP Version:\s*|VPP version:\s*)(.*)") - REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)" - r"(RTE Version: 'DPDK )(.*)(')") + REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)") REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$') @@ -389,14 +387,12 @@ class ExecutionChecker(ResultVisitor): :returns: Nothing. """ - if msg.message.count("Arguments:"): - message = str(msg.message).replace(' ', '').replace('\n', '').\ - replace("'", '"').replace('b"', '"').\ - replace("honeycom", "honeycomb") - message = loads(message[11:-1]) + if msg.message.count("Setup of TG node"): + reg_tg_ip = re.compile( + r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done') try: - self._testbed = message["TG"]["host"] - except (KeyError, ValueError): + self._testbed = str(re.search(reg_tg_ip, msg.message).group(1)) + except (KeyError, ValueError, IndexError, AttributeError): pass finally: self._data["metadata"]["testbed"] = self._testbed @@ -426,10 +422,10 @@ class ExecutionChecker(ResultVisitor): :returns: Nothing. """ - if msg.message.count("return STDOUT testpmd"): + if msg.message.count("DPDK Version:"): try: self._version = str(re.search( - self.REGEX_VERSION_DPDK, msg.message). group(4)) + self.REGEX_VERSION_DPDK, msg.message). group(2)) self._data["metadata"]["version"] = self._version except IndexError: pass @@ -500,21 +496,59 @@ class ExecutionChecker(ResultVisitor): :type msg: Message :returns: Nothing. """ - if msg.message.count("Thread 0 vpp_main"): + if msg.message.count("Runtime:"): self._show_run_lookup_nr += 1 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1: self._data["tests"][self._test_ID]["show-run"] = str() if self._lookup_kw_nr > 1: self._msg_type = None - if self._show_run_lookup_nr == 1: - text = msg.message.replace("vat# ", "").\ - replace("return STDOUT ", "").replace("\n\n", "\n").\ - replace('\n', ' |br| ').\ - replace('\r', '').replace('"', "'") + if self._show_run_lookup_nr > 0: + message = str(msg.message).replace(' ', '').replace('\n', '').\ + replace("'", '"').replace('b"', '"').replace('u"', '"')[8:] + runtime = loads(message) + try: + threads_nr = len(runtime[0]["clocks"]) + except (IndexError, KeyError): + return + tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"] + table = [[tbl_hdr, ] for _ in range(threads_nr)] + for item in runtime: + for idx in range(threads_nr): + table[idx].append([ + item["name"], + item["calls"][idx], + item["vectors"][idx], + item["suspends"][idx], + item["clocks"][idx] + ]) + text = "" + for idx in range(threads_nr): + text += "Thread {idx} ".format(idx=idx) + text += "vpp_main\n" if idx == 0 else \ + "vpp_wk_{idx}\n".format(idx=idx-1) + txt_table = None + for row in table[idx]: + if txt_table is None: + txt_table = prettytable.PrettyTable(row) + else: + if any(row[1:]): + txt_table.add_row(row) + txt_table.set_style(prettytable.MSWORD_FRIENDLY) + txt_table.align["Name"] = "l" + txt_table.align["Calls"] = "r" + txt_table.align["Vectors"] = "r" + txt_table.align["Suspends"] = "r" + txt_table.align["Clocks"] = "r" + + text += txt_table.get_string(sortby="Name") + '\n' + + text = text.replace('\n', ' |br| ').replace('\r', '').\ + replace('"', "'") try: self._data["tests"][self._test_ID]["show-run"] += " |br| " self._data["tests"][self._test_ID]["show-run"] += \ - "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text + "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \ + + text except KeyError: pass @@ -948,7 +982,7 @@ class ExecutionChecker(ResultVisitor): self._lookup_kw_nr += 1 self._show_run_lookup_nr = 0 self._msg_type = "test-show-runtime" - elif test_kw.name.count("Start The L2fwd Test") and not self._version: + elif test_kw.name.count("Install Dpdk Test") and not self._version: self._msg_type = "dpdk-version" else: return @@ -1187,13 +1221,10 @@ class InputData(object): return checker.data - def _download_and_parse_build(self, pid, data_queue, job, build, repeat): + def _download_and_parse_build(self, job, build, repeat, pid=10000): """Download and parse the input data file. :param pid: PID of the process executing this method. - :param data_queue: Shared memory between processes. Queue which keeps - the result data. This data is then read by the main process and used - in further processing. :param job: Name of the Jenkins job which generated the processed input file. :param build: Information about the Jenkins build which generated the @@ -1201,7 +1232,6 @@ class InputData(object): :param repeat: Repeat the download specified number of times if not successful. :type pid: int - :type data_queue: multiprocessing.Manager().Queue() :type job: str :type build: dict :type repeat: int @@ -1266,11 +1296,11 @@ class InputData(object): file_name = self._cfg.input["file-name"] full_name = join( self._cfg.environment["paths"]["DIR[WORKING,DATA]"], - "{job}{sep}{build}{sep}{name}". - format(job=job, - sep=SEPARATOR, - build=build["build"], - name=file_name)) + "{job}{sep}{build}{sep}{name}".format( + job=job, + sep=SEPARATOR, + build=build["build"], + name=file_name)) try: remove(full_name) logs.append(("INFO", @@ -1278,8 +1308,8 @@ class InputData(object): format(name=full_name))) except OSError as err: logs.append(("ERROR", - "Cannot remove the file '{0}': {1}". - format(full_name, repr(err)))) + "Cannot remove the file '{0}': {1}". + format(full_name, repr(err)))) logs.append(("INFO", " Done.")) for level, line in logs: @@ -1294,13 +1324,7 @@ class InputData(object): elif level == "WARNING": logging.warning(line) - result = { - "data": data, - "state": state, - "job": job, - "build": build - } - data_queue.put(result) + return {"data": data, "state": state, "job": job, "build": build} def download_and_parse_data(self, repeat=1): """Download the input data files, parse input data from input files and @@ -1313,61 +1337,34 @@ class InputData(object): logging.info("Downloading and parsing input files ...") - work_queue = multiprocessing.JoinableQueue() - manager = multiprocessing.Manager() - data_queue = manager.Queue() - cpus = multiprocessing.cpu_count() - - workers = list() - for cpu in range(cpus): - worker = Worker(work_queue, - data_queue, - self._download_and_parse_build) - worker.daemon = True - worker.start() - workers.append(worker) - os.system("taskset -p -c {0} {1} > /dev/null 2>&1". - format(cpu, worker.pid)) - for job, builds in self._cfg.builds.items(): for build in builds: - work_queue.put((job, build, repeat)) - work_queue.join() - - logging.info("Done.") + result = self._download_and_parse_build(job, build, repeat) + build_nr = result["build"]["build"] - while not data_queue.empty(): - result = data_queue.get() + if result["data"]: + data = result["data"] + build_data = pd.Series({ + "metadata": pd.Series( + data["metadata"].values(), + index=data["metadata"].keys()), + "suites": pd.Series(data["suites"].values(), + index=data["suites"].keys()), + "tests": pd.Series(data["tests"].values(), + index=data["tests"].keys())}) - job = result["job"] - build_nr = result["build"]["build"] + if self._input_data.get(job, None) is None: + self._input_data[job] = pd.Series() + self._input_data[job][str(build_nr)] = build_data - if result["data"]: - data = result["data"] - build_data = pd.Series({ - "metadata": pd.Series(data["metadata"].values(), - index=data["metadata"].keys()), - "suites": pd.Series(data["suites"].values(), - index=data["suites"].keys()), - "tests": pd.Series(data["tests"].values(), - index=data["tests"].keys())}) + self._cfg.set_input_file_name( + job, build_nr, result["build"]["file-name"]) - if self._input_data.get(job, None) is None: - self._input_data[job] = pd.Series() - self._input_data[job][str(build_nr)] = build_data + self._cfg.set_input_state(job, build_nr, result["state"]) - self._cfg.set_input_file_name(job, build_nr, - result["build"]["file-name"]) - - self._cfg.set_input_state(job, build_nr, result["state"]) - - del data_queue - - # Terminate all workers - for worker in workers: - worker.terminate() - worker.join() + logging.info("Memory allocation: {0:,d}MB".format( + resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)) logging.info("Done.") @@ -1501,6 +1498,96 @@ class InputData(object): "tags are enclosed by apostrophes.".format(cond)) return None + def filter_tests_by_name(self, element, params=None, data_set="tests", + continue_on_error=False): + """Filter required data from the given jobs and builds. + + The output data structure is: + + - job 1 + - build 1 + - test (or suite) 1 ID: + - param 1 + - param 2 + ... + - param n + ... + - test (or suite) n ID: + ... + ... + - build n + ... + - job n + + :param element: Element which will use the filtered data. + :param params: Parameters which will be included in the output. If None, + all parameters are included. + :param data_set: The set of data to be filtered: tests, suites, + metadata. + :param continue_on_error: Continue if there is error while reading the + data. The Item will be empty then + :type element: pandas.Series + :type params: list + :type data_set: str + :type continue_on_error: bool + :returns: Filtered data. + :rtype pandas.Series + """ + + include = element.get("include", None) + if not include: + logging.warning("No tests to include, skipping the element.") + return None + + if params is None: + params = element.get("parameters", None) + if params: + params.append("type") + + data = pd.Series() + try: + for job, builds in element["data"].items(): + data[job] = pd.Series() + for build in builds: + data[job][str(build)] = pd.Series() + for test in include: + try: + reg_ex = re.compile(str(test).lower()) + for test_ID in self.data[job][str(build)]\ + [data_set].keys(): + if re.match(reg_ex, str(test_ID).lower()): + test_data = self.data[job][str(build)]\ + [data_set][test_ID] + data[job][str(build)][test_ID] = pd.Series() + if params is None: + for param, val in test_data.items(): + data[job][str(build)][test_ID]\ + [param] = val + else: + for param in params: + try: + data[job][str(build)][test_ID]\ + [param] = test_data[param] + except KeyError: + data[job][str(build)][test_ID]\ + [param] = "No Data" + except KeyError as err: + logging.error("{err!r}".format(err=err)) + if continue_on_error: + continue + else: + return None + return data + + except (KeyError, IndexError, ValueError) as err: + logging.error("Missing mandatory parameter in the element " + "specification: {err!r}".format(err=err)) + return None + except AttributeError as err: + logging.error("{err!r}".format(err=err)) + return None + + @staticmethod def merge_data(data): """Merge data from more jobs and builds to a simple data structure.