X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Finput_data_parser.py;h=7e2abe6dbf9e83d5409facad713f6bdb9cbc0844;hb=1b95782ee3716d09f66524287dc5e93c59c133ea;hp=670cb3248ac4a6a3c3ad644e6a9e0e20d39472c4;hpb=4fa06bcfa9ef951b9062ddfc85ce58dcb742bcf7;p=csit.git diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index 670cb3248a..7e2abe6dbf 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -19,6 +19,7 @@ - filter the data using tags, """ +import copy import re import resource import pandas as pd @@ -30,7 +31,6 @@ from robot import errors from collections import OrderedDict from string import replace from os import remove -from os.path import join from datetime import datetime as dt from datetime import timedelta from json import loads @@ -98,24 +98,28 @@ class ExecutionChecker(ResultVisitor): "direction1": { "min": float, "avg": float, - "max": float + "max": float, + "hdrh": str }, "direction2": { "min": float, "avg": float, - "max": float + "max": float, + "hdrh": str } }, "PDR": { "direction1": { "min": float, "avg": float, - "max": float + "max": float, + "hdrh": str }, "direction2": { "min": float, "avg": float, - "max": float + "max": float, + "hdrh": str } } } @@ -147,60 +151,6 @@ class ExecutionChecker(ResultVisitor): } } - # TODO: Remove when definitely no NDRPDRDISC tests are used: - # NDRPDRDISC tests: - "ID": { - "name": "Test name", - "parent": "Name of the parent of the test", - "doc": "Test documentation", - "msg": "Test message", - "tags": ["tag 1", "tag 2", "tag n"], - "type": "PDR" | "NDR", - "status": "PASS" | "FAIL", - "throughput": { # Only type: "PDR" | "NDR" - "value": int, - "unit": "pps" | "bps" | "percentage" - }, - "latency": { # Only type: "PDR" | "NDR" - "direction1": { - "100": { - "min": int, - "avg": int, - "max": int - }, - "50": { # Only for NDR - "min": int, - "avg": int, - "max": int - }, - "10": { # Only for NDR - "min": int, - "avg": int, - "max": int - } - }, - "direction2": { - "100": { - "min": int, - "avg": int, - "max": int - }, - "50": { # Only for NDR - "min": int, - "avg": int, - "max": int - }, - "10": { # Only for NDR - "min": int, - "avg": int, - "max": int - } - } - }, - "lossTolerance": "lossTolerance", # Only type: "PDR" - "conf-history": "DUT1 and DUT2 VAT History" - "show-run": "Show Run" - }, "ID" { # next test } @@ -259,19 +209,6 @@ class ExecutionChecker(ResultVisitor): r'PDR_LOWER:\s(\d+.\d+).*\n.*\n' r'PDR_UPPER:\s(\d+.\d+)') - # TODO: Remove when definitely no NDRPDRDISC tests are used: - REGEX_LAT_NDR = re.compile(r'^[\D\d]*' - r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\',' - r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n' - r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\',' - r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n' - r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\',' - r'\s\'(-?\d+/-?\d+/-?\d+)\'\]') - - REGEX_LAT_PDR = re.compile(r'^[\D\d]*' - r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\',' - r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*') - REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n' r'LATENCY.*\[\'(.*)\', \'(.*)\'\]') @@ -555,53 +492,6 @@ class ExecutionChecker(ResultVisitor): except KeyError: pass - # TODO: Remove when definitely no NDRPDRDISC tests are used: - def _get_latency(self, msg, test_type): - """Get the latency data from the test message. - - :param msg: Message to be parsed. - :param test_type: Type of the test - NDR or PDR. - :type msg: str - :type test_type: str - :returns: Latencies parsed from the message. - :rtype: dict - """ - - if test_type == "NDR": - groups = re.search(self.REGEX_LAT_NDR, msg) - groups_range = range(1, 7) - elif test_type == "PDR": - groups = re.search(self.REGEX_LAT_PDR, msg) - groups_range = range(1, 3) - else: - return {} - - latencies = list() - for idx in groups_range: - try: - lat = [int(item) for item in str(groups.group(idx)).split('/')] - except (AttributeError, ValueError): - lat = [-1, -1, -1] - latencies.append(lat) - - keys = ("min", "avg", "max") - latency = { - "direction1": { - }, - "direction2": { - } - } - - latency["direction1"]["100"] = dict(zip(keys, latencies[0])) - latency["direction2"]["100"] = dict(zip(keys, latencies[1])) - if test_type == "NDR": - latency["direction1"]["50"] = dict(zip(keys, latencies[2])) - latency["direction2"]["50"] = dict(zip(keys, latencies[3])) - latency["direction1"]["10"] = dict(zip(keys, latencies[4])) - latency["direction2"]["10"] = dict(zip(keys, latencies[5])) - - return latency - def _get_ndrpdr_throughput(self, msg): """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test message. @@ -666,31 +556,52 @@ class ExecutionChecker(ResultVisitor): :returns: Parsed data as a dict and the status (PASS/FAIL). :rtype: tuple(dict, str) """ - + latency_default = {"min": -1.0, "avg": -1.0, "max": -1.0, "hdrh": ""} latency = { "NDR": { - "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0}, - "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0} + "direction1": copy.copy(latency_default), + "direction2": copy.copy(latency_default) }, "PDR": { - "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0}, - "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0} + "direction1": copy.copy(latency_default), + "direction2": copy.copy(latency_default) } } status = "FAIL" groups = re.search(self.REGEX_NDRPDR_LAT, msg) + def process_latency(in_str): + """Return object with parsed latency values. + + TODO: Define class for the return type. + + :param in_str: Input string, min/avg/max/hdrh format. + :type in_str: str + :returns: Dict with corresponding keys, except hdrh float values. + :rtype dict: + :throws IndexError: If in_str does not have enough substrings. + :throws ValueError: If a substring does not convert to float. + """ + in_list = in_str.split('/') + + rval = { + "min": float(in_list[0]), + "avg": float(in_list[1]), + "max": float(in_list[2]), + "hdrh": "" + } + + if len(in_list) == 4: + rval["hdrh"] = str(in_list[3]) + + return rval + if groups is not None: - keys = ("min", "avg", "max") try: - latency["NDR"]["direction1"] = dict( - zip(keys, [float(l) for l in groups.group(1).split('/')])) - latency["NDR"]["direction2"] = dict( - zip(keys, [float(l) for l in groups.group(2).split('/')])) - latency["PDR"]["direction1"] = dict( - zip(keys, [float(l) for l in groups.group(3).split('/')])) - latency["PDR"]["direction2"] = dict( - zip(keys, [float(l) for l in groups.group(4).split('/')])) + latency["NDR"]["direction1"] = process_latency(groups.group(1)) + latency["NDR"]["direction2"] = process_latency(groups.group(2)) + latency["PDR"]["direction1"] = process_latency(groups.group(3)) + latency["PDR"]["direction2"] = process_latency(groups.group(4)) status = "PASS" except (IndexError, ValueError): pass @@ -1312,23 +1223,6 @@ class InputData(object): ("INFO", " The build {job}/{build} is outdated, will be " "removed".format(job=job, build=build["build"]))) - file_name = self._cfg.input["file-name"] - full_name = join( - self._cfg.environment["paths"]["DIR[WORKING,DATA]"], - "{job}{sep}{build}{sep}{name}".format( - job=job, - sep=SEPARATOR, - build=build["build"], - name=file_name)) - try: - remove(full_name) - logs.append(("INFO", - " The file {name} has been removed". - format(name=full_name))) - except OSError as err: - logs.append(("ERROR", - "Cannot remove the file '{0}': {1}". - format(full_name, repr(err)))) logs.append(("INFO", " Done.")) for level, line in logs: