X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Finput_data_parser.py;h=67aec2f8b1c1005f954f2ee35e2c2614326c8ecf;hp=89b8eee68fddfd077857f6310435ce682d0314c0;hb=7ece7949b9097e44ae4ce12e4dd8b95da5dfbc49;hpb=40ece2a4354f2dcc4af2928ab45762ccc22625e7 diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index 89b8eee68f..67aec2f8b1 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 Cisco and/or its affiliates. +# Copyright (c) 2022 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -263,8 +263,7 @@ class ExecutionChecker(ResultVisitor): ) REGEX_VERSION_VPP = re.compile( - r"(return STDOUT Version:\s*|" - r"VPP Version:\s*|VPP version:\s*)(.*)" + r"(VPP Version:\s*|VPP version:\s*)(.*)" ) REGEX_VERSION_DPDK = re.compile( r"(DPDK version:\s*|DPDK Version:\s*)(.*)" @@ -297,7 +296,7 @@ class ExecutionChecker(ResultVisitor): r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"' ) - def __init__(self, metadata, mapping, ignore, for_output): + def __init__(self, metadata, mapping, ignore, process_oper): """Initialisation. :param metadata: Key-value pairs to be included in "metadata" part of @@ -305,11 +304,12 @@ class ExecutionChecker(ResultVisitor): :param mapping: Mapping of the old names of test cases to the new (actual) one. :param ignore: List of TCs to be ignored. - :param for_output: Output to be generated from downloaded data. + :param process_oper: If True, operational data (show run, telemetry) is + processed. :type metadata: dict :type mapping: dict :type ignore: list - :type for_output: str + :type process_oper: bool """ # Type of message to parse out from the test messages @@ -330,7 +330,7 @@ class ExecutionChecker(ResultVisitor): # Ignore list self._ignore = ignore - self._for_output = for_output + self._process_oper = process_oper # Number of PAPI History messages found: # 0 - no message @@ -591,14 +591,14 @@ class ExecutionChecker(ResultVisitor): :returns: Nothing. """ - if msg.message.count(u"return STDOUT Version:") or \ - msg.message.count(u"VPP Version:") or \ - msg.message.count(u"VPP version:"): + if msg.message.count(u"VPP version:") or \ + msg.message.count(u"VPP Version:"): self._version = str( re.search(self.REGEX_VERSION_VPP, msg.message).group(2) ) self._data[u"metadata"][u"version"] = self._version self._msg_type = None + logging.info(self._version) def _get_dpdk_version(self, msg): """Called when extraction of DPDK version is required. @@ -1042,7 +1042,7 @@ class ExecutionChecker(ResultVisitor): u"level": len(suite.longname.split(u".")) } - suite.keywords.visit(self) + suite.setup.visit(self) def end_suite(self, suite): """Called when suite ends. @@ -1060,7 +1060,7 @@ class ExecutionChecker(ResultVisitor): :returns: Nothing. """ if self.start_test(test) is not False: - test.keywords.visit(self) + test.body.visit(self) self.end_test(test) def start_test(self, test): @@ -1201,20 +1201,15 @@ class ExecutionChecker(ResultVisitor): if test.status == u"PASS": test_result[u"throughput"], test_result[u"status"] = \ self._get_plr_throughput(test.message) + elif u"LDP_NGINX" in tags: + test_result[u"type"] = u"LDP_NGINX" + test_result[u"result"], test_result[u"status"] = \ + self._get_vsap_data(test.message, tags) elif u"HOSTSTACK" in tags: test_result[u"type"] = u"HOSTSTACK" if test.status == u"PASS": test_result[u"result"], test_result[u"status"] = \ self._get_hoststack_data(test.message, tags) - elif u"LDP_NGINX" in tags: - test_result[u"type"] = u"LDP_NGINX" - test_result[u"result"], test_result[u"status"] = \ - self._get_vsap_data(test.message, tags) - # elif u"TCP" in tags: # This might be not used - # test_result[u"type"] = u"TCP" - # if test.status == u"PASS": - # groups = re.search(self.REGEX_TCP, test.message) - # test_result[u"result"] = int(groups.group(2)) elif u"RECONF" in tags: test_result[u"type"] = u"RECONF" if test.status == u"PASS": @@ -1284,7 +1279,7 @@ class ExecutionChecker(ResultVisitor): :type test_kw: Keyword :returns: Nothing. """ - for keyword in test_kw.keywords: + for keyword in test_kw.body: if self.start_test_kw(keyword) is not False: self.visit_test_kw(keyword) self.end_test_kw(keyword) @@ -1297,7 +1292,7 @@ class ExecutionChecker(ResultVisitor): :type test_kw: Keyword :returns: Nothing. """ - if self._for_output == u"trending": + if not self._process_oper: return if test_kw.name.count(u"Run Telemetry On All Duts"): @@ -1326,7 +1321,7 @@ class ExecutionChecker(ResultVisitor): :type setup_kw: Keyword :returns: Nothing. """ - for keyword in setup_kw.keywords: + for keyword in setup_kw.setup: if self.start_setup_kw(keyword) is not False: self.visit_setup_kw(keyword) self.end_setup_kw(keyword) @@ -1367,7 +1362,7 @@ class ExecutionChecker(ResultVisitor): :type teardown_kw: Keyword :returns: Nothing. """ - for keyword in teardown_kw.keywords: + for keyword in teardown_kw.body: if self.start_teardown_kw(keyword) is not False: self.visit_teardown_kw(keyword) self.end_teardown_kw(keyword) @@ -1454,7 +1449,7 @@ class InputData: self._for_output = for_output # Data store: - self._input_data = pd.Series() + self._input_data = pd.Series(dtype="float64") @property def data(self): @@ -1526,17 +1521,27 @@ class InputData: f"Error occurred while parsing output.xml: {repr(err)}" ) return None + + process_oper = False + if u"-vpp-perf-report-coverage-" in job: + process_oper = True + # elif u"-vpp-perf-report-iterative-" in job: + # # Exceptions for TBs where we do not have coverage data: + # for item in (u"-2n-icx", ): + # if item in job: + # process_oper = True + # break checker = ExecutionChecker( - metadata, self._cfg.mapping, self._cfg.ignore, self._for_output + metadata, self._cfg.mapping, self._cfg.ignore, process_oper ) result.visit(checker) checker.data[u"metadata"][u"tests_total"] = \ - result.statistics.total.all.total + result.statistics.total.total checker.data[u"metadata"][u"tests_passed"] = \ - result.statistics.total.all.passed + result.statistics.total.passed checker.data[u"metadata"][u"tests_failed"] = \ - result.statistics.total.all.failed + result.statistics.total.failed checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14] @@ -1659,7 +1664,7 @@ class InputData: }) if self._input_data.get(job, None) is None: - self._input_data[job] = pd.Series() + self._input_data[job] = pd.Series(dtype="float64") self._input_data[job][str(build_nr)] = build_data self._cfg.set_input_file_name( job, build_nr, result[u"build"][u"file-name"] @@ -1738,7 +1743,7 @@ class InputData: }) if self._input_data.get(job, None) is None: - self._input_data[job] = pd.Series() + self._input_data[job] = pd.Series(dtype="float64") self._input_data[job][str(build_nr)] = build_data self._cfg.set_input_state(job, build_nr, u"processed") @@ -1895,12 +1900,12 @@ class InputData: params.extend((u"type", u"status")) data_to_filter = data if data else element[u"data"] - data = pd.Series() + data = pd.Series(dtype="float64") try: for job, builds in data_to_filter.items(): - data[job] = pd.Series() + data[job] = pd.Series(dtype="float64") for build in builds: - data[job][str(build)] = pd.Series() + data[job][str(build)] = pd.Series(dtype="float64") try: data_dict = dict( self.data[job][str(build)][data_set].items()) @@ -1911,7 +1916,8 @@ class InputData: for test_id, test_data in data_dict.items(): if eval(cond, {u"tags": test_data.get(u"tags", u"")}): - data[job][str(build)][test_id] = pd.Series() + data[job][str(build)][test_id] = \ + pd.Series(dtype="float64") if params is None: for param, val in test_data.items(): data[job][str(build)][test_id][param] = val @@ -1995,12 +2001,12 @@ class InputData: else: tests = include - data = pd.Series() + data = pd.Series(dtype="float64") try: for job, builds in element[u"data"].items(): - data[job] = pd.Series() + data[job] = pd.Series(dtype="float64") for build in builds: - data[job][str(build)] = pd.Series() + data[job][str(build)] = pd.Series(dtype="float64") for test in tests: try: reg_ex = re.compile(str(test).lower()) @@ -2009,7 +2015,8 @@ class InputData: if re.match(reg_ex, str(test_id).lower()): test_data = self.data[job][ str(build)][data_set][test_id] - data[job][str(build)][test_id] = pd.Series() + data[job][str(build)][test_id] = \ + pd.Series(dtype="float64") if params is None: for param, val in test_data.items(): data[job][str(build)][test_id]\ @@ -2064,7 +2071,7 @@ class InputData: logging.info(u" Merging data ...") - merged_data = pd.Series() + merged_data = pd.Series(dtype="float64") for builds in data.values: for item in builds.values: for item_id, item_data in item.items():