X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Finput_data_parser.py;h=c63e3eb7828d80677af9ac741c548d0550317dbc;hb=refs%2Fchanges%2F25%2F22525%2F1;hp=bbbf0a9ae09a241c24ffd4d9ccec301b48122fa0;hpb=c093f3f8a6c116c1897bb5f6793e99111ddb0682;p=csit.git diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index bbbf0a9ae0..c63e3eb782 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -283,7 +283,7 @@ class ExecutionChecker(ResultVisitor): REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)") - REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$') + REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$') REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s' r'tx\s(\d*),\srx\s(\d*)') @@ -291,13 +291,16 @@ class ExecutionChecker(ResultVisitor): REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results' r' in packets per second: \[(.*)\]') + REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)') + REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)') + REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]') REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-') REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-') - REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-') + REGEX_TC_NUMBER = re.compile(r'tc\d{2}-') def __init__(self, metadata, mapping, ignore): """Initialisation. @@ -457,7 +460,7 @@ class ExecutionChecker(ResultVisitor): self._data["tests"][self._test_ID]["conf-history"] = str() else: self._msg_type = None - text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} " + text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} " "VAT command history:", "", msg.message, count=1). \ replace("\n\n", "\n").replace('\n', ' |br| ').\ replace('\r', '').replace('"', "'") @@ -479,7 +482,7 @@ class ExecutionChecker(ResultVisitor): self._data["tests"][self._test_ID]["conf-history"] = str() else: self._msg_type = None - text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} " + text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} " "PAPI command history:", "", msg.message, count=1). \ replace("\n\n", "\n").replace('\n', ' |br| ').\ replace('\r', '').replace('"', "'") @@ -502,7 +505,7 @@ class ExecutionChecker(ResultVisitor): self._data["tests"][self._test_ID]["show-run"] = str() if self._lookup_kw_nr > 1: self._msg_type = None - if self._show_run_lookup_nr == 1: + if self._show_run_lookup_nr > 0: message = str(msg.message).replace(' ', '').replace('\n', '').\ replace("'", '"').replace('b"', '"').replace('u"', '"')[8:] runtime = loads(message) @@ -547,7 +550,8 @@ class ExecutionChecker(ResultVisitor): try: self._data["tests"][self._test_ID]["show-run"] += " |br| " self._data["tests"][self._test_ID]["show-run"] += \ - "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text + "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \ + + text except KeyError: pass @@ -801,6 +805,7 @@ class ExecutionChecker(ResultVisitor): groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID) if not groups: tag_count = 0 + tag_tc = str() for tag in test_result["tags"]: groups = re.search(self.REGEX_TC_TAG, tag) if groups: @@ -829,7 +834,8 @@ class ExecutionChecker(ResultVisitor): "SOAK" in tags or "TCP" in tags or "MRR" in tags or - "BMRR" in tags): + "BMRR" in tags or + "RECONF" in tags): # TODO: Remove when definitely no NDRPDRDISC tests are used: if "NDRDISC" in tags: test_result["type"] = "NDR" @@ -846,6 +852,8 @@ class ExecutionChecker(ResultVisitor): test_result["type"] = "MRR" elif "FRMOBL" in tags or "BMRR" in tags: test_result["type"] = "BMRR" + elif "RECONF" in tags: + test_result["type"] = "RECONF" else: test_result["status"] = "FAIL" self._data["tests"][self._test_ID] = test_result @@ -907,6 +915,18 @@ class ExecutionChecker(ResultVisitor): AvgStdevMetadataFactory.from_data([ float(groups.group(3)) / float(groups.group(1)), ]) + elif test_result["type"] == "RECONF": + test_result["result"] = None + try: + grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message) + grps_time = re.search(self.REGEX_RECONF_TIME, test.message) + test_result["result"] = { + "loss": int(grps_loss.group(1)), + "time": float(grps_time.group(1)) + } + except (AttributeError, IndexError, ValueError, TypeError): + test_result["status"] = "FAIL" + self._data["tests"][self._test_ID] = test_result def end_test(self, test): @@ -1405,7 +1425,7 @@ class InputData(object): index += 1 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:] - def filter_data(self, element, params=None, data_set="tests", + def filter_data(self, element, params=None, data=None, data_set="tests", continue_on_error=False): """Filter required data from the given jobs and builds. @@ -1428,13 +1448,16 @@ class InputData(object): :param element: Element which will use the filtered data. :param params: Parameters which will be included in the output. If None, - all parameters are included. + all parameters are included. + :param data: If not None, this data is used instead of data specified + in the element. :param data_set: The set of data to be filtered: tests, suites, - metadata. + metadata. :param continue_on_error: Continue if there is error while reading the - data. The Item will be empty then + data. The Item will be empty then :type element: pandas.Series :type params: list + :type data: dict :type data_set: str :type continue_on_error: bool :returns: Filtered data. @@ -1456,9 +1479,10 @@ class InputData(object): if params: params.append("type") + data_to_filter = data if data else element["data"] data = pd.Series() try: - for job, builds in element["data"].items(): + for job, builds in data_to_filter.items(): data[job] = pd.Series() for build in builds: data[job][str(build)] = pd.Series() @@ -1497,6 +1521,96 @@ class InputData(object): "tags are enclosed by apostrophes.".format(cond)) return None + def filter_tests_by_name(self, element, params=None, data_set="tests", + continue_on_error=False): + """Filter required data from the given jobs and builds. + + The output data structure is: + + - job 1 + - build 1 + - test (or suite) 1 ID: + - param 1 + - param 2 + ... + - param n + ... + - test (or suite) n ID: + ... + ... + - build n + ... + - job n + + :param element: Element which will use the filtered data. + :param params: Parameters which will be included in the output. If None, + all parameters are included. + :param data_set: The set of data to be filtered: tests, suites, + metadata. + :param continue_on_error: Continue if there is error while reading the + data. The Item will be empty then + :type element: pandas.Series + :type params: list + :type data_set: str + :type continue_on_error: bool + :returns: Filtered data. + :rtype pandas.Series + """ + + include = element.get("include", None) + if not include: + logging.warning("No tests to include, skipping the element.") + return None + + if params is None: + params = element.get("parameters", None) + if params: + params.append("type") + + data = pd.Series() + try: + for job, builds in element["data"].items(): + data[job] = pd.Series() + for build in builds: + data[job][str(build)] = pd.Series() + for test in include: + try: + reg_ex = re.compile(str(test).lower()) + for test_ID in self.data[job][str(build)]\ + [data_set].keys(): + if re.match(reg_ex, str(test_ID).lower()): + test_data = self.data[job][str(build)]\ + [data_set][test_ID] + data[job][str(build)][test_ID] = pd.Series() + if params is None: + for param, val in test_data.items(): + data[job][str(build)][test_ID]\ + [param] = val + else: + for param in params: + try: + data[job][str(build)][test_ID]\ + [param] = test_data[param] + except KeyError: + data[job][str(build)][test_ID]\ + [param] = "No Data" + except KeyError as err: + logging.error("{err!r}".format(err=err)) + if continue_on_error: + continue + else: + return None + return data + + except (KeyError, IndexError, ValueError) as err: + logging.error("Missing mandatory parameter in the element " + "specification: {err!r}".format(err=err)) + return None + except AttributeError as err: + logging.error("{err!r}".format(err=err)) + return None + + @staticmethod def merge_data(data): """Merge data from more jobs and builds to a simple data structure.