X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Finput_data_parser.py;h=00c2380fdc8d17388944ea45a731374184b725db;hb=ec79cd31fd974372fd6cd02be6a504dff6d50e8e;hp=94f8e96ec839bd6ea7ad9fcbd29b5129adef1bb5;hpb=36048e330c53b0325fb120da0f7661ad2dd44611;p=csit.git diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index 94f8e96ec8..00c2380fdc 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -295,7 +295,11 @@ class ExecutionChecker(ResultVisitor): REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)') - def __init__(self, metadata, mapping, ignore): + REGEX_SH_RUN_HOST = re.compile( + r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"' + ) + + def __init__(self, metadata, mapping, ignore, for_output): """Initialisation. :param metadata: Key-value pairs to be included in "metadata" part of @@ -303,9 +307,11 @@ class ExecutionChecker(ResultVisitor): :param mapping: Mapping of the old names of test cases to the new (actual) one. :param ignore: List of TCs to be ignored. + :param for_output: Output to be generated from downloaded data. :type metadata: dict :type mapping: dict :type ignore: list + :type for_output: str """ # Type of message to parse out from the test messages @@ -326,6 +332,8 @@ class ExecutionChecker(ResultVisitor): # Ignore list self._ignore = ignore + self._for_output = for_output + # Number of PAPI History messages found: # 0 - no message # 1 - PAPI History of DUT1 @@ -333,6 +341,8 @@ class ExecutionChecker(ResultVisitor): self._conf_history_lookup_nr = 0 self._sh_run_counter = 0 + self._telemetry_kw_counter = 0 + self._telemetry_msg_counter = 0 # Test ID of currently processed test- the lowercase full path to the # test @@ -357,7 +367,8 @@ class ExecutionChecker(ResultVisitor): u"dpdk-version": self._get_dpdk_version, u"teardown-papi-history": self._get_papi_history, u"test-show-runtime": self._get_show_run, - u"testbed": self._get_testbed + u"testbed": self._get_testbed, + u"test-telemetry": self._get_telemetry } @property @@ -669,10 +680,6 @@ class ExecutionChecker(ResultVisitor): except (AttributeError, IndexError): sock = u"" - runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u''). - replace(u"'", u'"').replace(u'b"', u'"'). - replace(u'u"', u'"').split(u":", 1)[1]) - dut = u"dut{nr}".format( nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1) @@ -681,7 +688,80 @@ class ExecutionChecker(ResultVisitor): { u"host": host, u"socket": sock, - u"runtime": runtime, + u"runtime": str(msg.message).replace(u' ', u''). + replace(u'\n', u'').replace(u"'", u'"'). + replace(u'b"', u'"').replace(u'u"', u'"'). + split(u":", 1)[1] + } + ) + + def _get_telemetry(self, msg): + """Called when extraction of VPP telemetry data is required. + + :param msg: Message to process. + :type msg: Message + :returns: Nothing. + """ + + if self._telemetry_kw_counter > 1: + return + if not msg.message.count(u"# TYPE vpp_runtime_calls"): + return + + if u"telemetry-show-run" not in \ + self._data[u"tests"][self._test_id].keys(): + self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict() + + self._telemetry_msg_counter += 1 + groups = re.search(self.REGEX_SH_RUN_HOST, msg.message) + if not groups: + return + try: + host = groups.group(1) + except (AttributeError, IndexError): + host = u"" + try: + sock = groups.group(2) + except (AttributeError, IndexError): + sock = u"" + runtime = { + u"source_type": u"node", + u"source_id": host, + u"msg_type": u"metric", + u"log_level": u"INFO", + u"timestamp": msg.timestamp, + u"msg": u"show_runtime", + u"host": host, + u"socket": sock, + u"data": list() + } + for line in msg.message.splitlines(): + if not line.startswith(u"vpp_runtime_"): + continue + try: + params, value, timestamp = line.rsplit(u" ", maxsplit=2) + cut = params.index(u"{") + name = params[:cut].split(u"_", maxsplit=2)[-1] + labels = eval( + u"dict" + params[cut:].replace('{', '(').replace('}', ')') + ) + labels[u"graph_node"] = labels.pop(u"name") + runtime[u"data"].append( + { + u"name": name, + u"value": value, + u"timestamp": timestamp, + u"labels": labels + } + ) + except (TypeError, ValueError, IndexError): + continue + self._data[u'tests'][self._test_id][u'telemetry-show-run']\ + [f"dut{self._telemetry_msg_counter}"] = copy.copy( + { + u"host": host, + u"socket": sock, + u"runtime": runtime } ) @@ -1000,6 +1080,8 @@ class ExecutionChecker(ResultVisitor): """ self._sh_run_counter = 0 + self._telemetry_kw_counter = 0 + self._telemetry_msg_counter = 0 longname_orig = test.longname.lower() @@ -1225,9 +1307,13 @@ class ExecutionChecker(ResultVisitor): :type test_kw: Keyword :returns: Nothing. """ - if test_kw.name.count(u"Show Runtime On All Duts") or \ - test_kw.name.count(u"Show Runtime Counters On All Duts") or \ - test_kw.name.count(u"Vpp Show Runtime On All Duts"): + if self._for_output == u"trending": + return + + if test_kw.name.count(u"Run Telemetry On All Duts"): + self._msg_type = u"test-telemetry" + self._telemetry_kw_counter += 1 + elif test_kw.name.count(u"Show Runtime On All Duts"): self._msg_type = u"test-show-runtime" self._sh_run_counter += 1 else: @@ -1366,16 +1452,20 @@ class InputData: (as described in ExecutionChecker documentation) """ - def __init__(self, spec): + def __init__(self, spec, for_output): """Initialization. :param spec: Specification. + :param for_output: Output to be generated from downloaded data. :type spec: Specification + :type for_output: str """ # Specification: self._cfg = spec + self._for_output = for_output + # Data store: self._input_data = pd.Series() @@ -1450,7 +1540,7 @@ class InputData: ) return None checker = ExecutionChecker( - metadata, self._cfg.mapping, self._cfg.ignore + metadata, self._cfg.mapping, self._cfg.ignore, self._for_output ) result.visit(checker) @@ -1998,13 +2088,14 @@ class InputData: for dut_name, data in test_data[u"show-run"].items(): if data.get(u"runtime", None) is None: continue + runtime = loads(data[u"runtime"]) try: - threads_nr = len(data[u"runtime"][0][u"clocks"]) + threads_nr = len(runtime[0][u"clocks"]) except (IndexError, KeyError): continue threads = OrderedDict( {idx: list() for idx in range(threads_nr)}) - for item in data[u"runtime"]: + for item in runtime: for idx in range(threads_nr): if item[u"vectors"][idx] > 0: clocks = item[u"clocks"][idx] / \ @@ -2025,8 +2116,8 @@ class InputData: vectors_call = 0.0 if int(item[u"calls"][idx]) + int( - item[u"vectors"][idx]) + \ - int(item[u"suspends"][idx]): + item[u"vectors"][idx]) + \ + int(item[u"suspends"][idx]): threads[idx].append([ item[u"name"], item[u"calls"][idx],