-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
import logging
from collections import OrderedDict
-from os import remove
+from os import remove, walk, listdir
+from os.path import isfile, isdir, join
from datetime import datetime as dt
from datetime import timedelta
from json import loads
+from json.decoder import JSONDecodeError
import hdrh.histogram
import hdrh.codec
from resources.libraries.python import jumpavg
from input_data_files import download_and_unzip_data_file
+from pal_errors import PresentationError
# Separator used in file names
-SEPARATOR = u"__"
+SEPARATOR = "__"
class ExecutionChecker(ResultVisitor):
"""Class to traverse through the test suite structure.
-
- The functionality implemented in this class generates a json structure:
-
- Performance tests:
-
- {
- "metadata": {
- "generated": "Timestamp",
- "version": "SUT version",
- "job": "Jenkins job name",
- "build": "Information about the build"
- },
- "suites": {
- "Suite long name 1": {
- "name": Suite name,
- "doc": "Suite 1 documentation",
- "parent": "Suite 1 parent",
- "level": "Level of the suite in the suite hierarchy"
- }
- "Suite long name N": {
- "name": Suite name,
- "doc": "Suite N documentation",
- "parent": "Suite 2 parent",
- "level": "Level of the suite in the suite hierarchy"
- }
- }
- "tests": {
- # NDRPDR tests:
- "ID": {
- "name": "Test name",
- "parent": "Name of the parent of the test",
- "doc": "Test documentation",
- "msg": "Test message",
- "conf-history": "DUT1 and DUT2 VAT History",
- "show-run": "Show Run",
- "tags": ["tag 1", "tag 2", "tag n"],
- "type": "NDRPDR",
- "status": "PASS" | "FAIL",
- "throughput": {
- "NDR": {
- "LOWER": float,
- "UPPER": float
- },
- "PDR": {
- "LOWER": float,
- "UPPER": float
- }
- },
- "latency": {
- "NDR": {
- "direction1": {
- "min": float,
- "avg": float,
- "max": float,
- "hdrh": str
- },
- "direction2": {
- "min": float,
- "avg": float,
- "max": float,
- "hdrh": str
- }
- },
- "PDR": {
- "direction1": {
- "min": float,
- "avg": float,
- "max": float,
- "hdrh": str
- },
- "direction2": {
- "min": float,
- "avg": float,
- "max": float,
- "hdrh": str
- }
- }
- }
- }
-
- # TCP tests:
- "ID": {
- "name": "Test name",
- "parent": "Name of the parent of the test",
- "doc": "Test documentation",
- "msg": "Test message",
- "tags": ["tag 1", "tag 2", "tag n"],
- "type": "TCP",
- "status": "PASS" | "FAIL",
- "result": int
- }
-
- # MRR, BMRR tests:
- "ID": {
- "name": "Test name",
- "parent": "Name of the parent of the test",
- "doc": "Test documentation",
- "msg": "Test message",
- "tags": ["tag 1", "tag 2", "tag n"],
- "type": "MRR" | "BMRR",
- "status": "PASS" | "FAIL",
- "result": {
- "receive-rate": float,
- # Average of a list, computed using AvgStdevStats.
- # In CSIT-1180, replace with List[float].
- }
- }
-
- "ID" {
- # next test
- }
- }
- }
-
-
- Functional tests:
-
- {
- "metadata": { # Optional
- "version": "VPP version",
- "job": "Jenkins job name",
- "build": "Information about the build"
- },
- "suites": {
- "Suite name 1": {
- "doc": "Suite 1 documentation",
- "parent": "Suite 1 parent",
- "level": "Level of the suite in the suite hierarchy"
- }
- "Suite name N": {
- "doc": "Suite N documentation",
- "parent": "Suite 2 parent",
- "level": "Level of the suite in the suite hierarchy"
- }
- }
- "tests": {
- "ID": {
- "name": "Test name",
- "parent": "Name of the parent of the test",
- "doc": "Test documentation"
- "msg": "Test message"
- "tags": ["tag 1", "tag 2", "tag n"],
- "conf-history": "DUT1 and DUT2 VAT History"
- "show-run": "Show Run"
- "status": "PASS" | "FAIL"
- },
- "ID" {
- # next test
- }
- }
- }
-
- .. note:: ID is the lowercase full path to the test.
"""
REGEX_PLR_RATE = re.compile(
r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
r'PDR_UPPER:\s(\d+.\d+)'
)
+ REGEX_NDRPDR_GBPS = re.compile(
+ r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
+ r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
+ r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
+ r'PDR_UPPER:.*,\s(\d+.\d+)'
+ )
REGEX_PERF_MSG_INFO = re.compile(
- r'NDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
- r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
- r'NDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
- r'PDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
- r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
- r'PDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*)'
+ r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
+ r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
+ r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
+ r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
+ r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
+ )
+ REGEX_CPS_MSG_INFO = re.compile(
+ r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
+ r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
)
- # TODO: Remove when not needed
+ REGEX_PPS_MSG_INFO = re.compile(
+ r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
+ r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
+ )
+ REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
+
+ REGEX_VSAP_MSG_INFO = re.compile(
+ r'Transfer Rate: (\d*.\d*).*\n'
+ r'Latency: (\d*.\d*).*\n'
+ r'Connection [c|r]ps rate: (\d*).*\n'
+ r'Total data transferred: (\d*).*\n'
+ r'Completed requests: (\d*).*\n'
+ r'Failed requests:\s*(\d*.\d*)'
+ )
+
+ # Needed for CPS and PPS tests
REGEX_NDRPDR_LAT_BASE = re.compile(
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]'
)
- # TODO: Remove when not needed
- REGEX_NDRPDR_LAT_LONG = re.compile(
- r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
- r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
- r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
- r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
- r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
- r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
- r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
- r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
- r'Latency.*\[\'(.*)\', \'(.*)\'\]'
- )
+
REGEX_VERSION_VPP = re.compile(
- r"(return STDOUT Version:\s*|"
- r"VPP Version:\s*|VPP version:\s*)(.*)"
+ r"(VPP Version:\s*|VPP version:\s*)(.*)"
)
REGEX_VERSION_DPDK = re.compile(
r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
r'tx\s(\d*),\srx\s(\d*)'
)
REGEX_BMRR = re.compile(
- r'Maximum Receive Rate trial results'
- r' in packets per second: \[(.*)\]'
+ r'.*trial results.*: \[(.*)\]'
)
REGEX_RECONF_LOSS = re.compile(
r'Packets lost due to reconfig: (\d*)'
)
REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
- REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
-
REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
- def __init__(self, metadata, mapping, ignore):
+ REGEX_SH_RUN_HOST = re.compile(
+ r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
+ )
+
+ def __init__(self, metadata, mapping, ignore, process_oper):
"""Initialisation.
:param metadata: Key-value pairs to be included in "metadata" part of
:param mapping: Mapping of the old names of test cases to the new
(actual) one.
:param ignore: List of TCs to be ignored.
+ :param process_oper: If True, operational data (show run, telemetry) is
+ processed.
:type metadata: dict
:type mapping: dict
:type ignore: list
+ :type process_oper: bool
"""
- # Type of message to parse out from the test messages
- self._msg_type = None
+ # Mapping of TCs long names
+ self._mapping = mapping
+
+ # Ignore list
+ self._ignore = ignore
+
+ # Process operational data
+ self._process_oper = process_oper
+
+ # Name of currently processed keyword
+ self._kw_name = None
# VPP version
self._version = None
# Testbed. The testbed is identified by TG node IP address.
self._testbed = None
- # Mapping of TCs long names
- self._mapping = mapping
-
- # Ignore list
- self._ignore = ignore
-
# Number of PAPI History messages found:
# 0 - no message
# 1 - PAPI History of DUT1
self._conf_history_lookup_nr = 0
self._sh_run_counter = 0
+ self._telemetry_kw_counter = 0
+ self._telemetry_msg_counter = 0
# Test ID of currently processed test- the lowercase full path to the
# test
# The main data structure
self._data = {
- u"metadata": OrderedDict(),
- u"suites": OrderedDict(),
- u"tests": OrderedDict()
+ "metadata": dict(),
+ "suites": dict(),
+ "tests": dict()
}
# Save the provided metadata
for key, val in metadata.items():
- self._data[u"metadata"][key] = val
-
- # Dictionary defining the methods used to parse different types of
- # messages
- self.parse_msg = {
- u"timestamp": self._get_timestamp,
- u"vpp-version": self._get_vpp_version,
- u"dpdk-version": self._get_dpdk_version,
- # TODO: Remove when not needed:
- u"teardown-vat-history": self._get_vat_history,
- u"teardown-papi-history": self._get_papi_history,
- u"test-show-runtime": self._get_show_run,
- u"testbed": self._get_testbed
- }
+ self._data["metadata"][key] = val
@property
def data(self):
"""
return self._data
- def _get_data_from_perf_test_msg(self, msg):
- """Get
- - NDR_LOWER
- - LATENCY
- - NDR_UPPER
- - PDR_LOWER
- - LATENCY
- - PDR_UPPER
- from message of NDRPDR performance tests.
+ def _get_data_from_mrr_test_msg(self, msg):
+ """Get info from message of MRR performance tests.
:param msg: Message to be processed.
:type msg: str
:rtype: str
"""
+ groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
+ if not groups or groups.lastindex != 1:
+ return "Test Failed."
+
+ try:
+ data = groups.group(1).split(", ")
+ except (AttributeError, IndexError, ValueError, KeyError):
+ return "Test Failed."
+
+ out_str = "["
+ try:
+ for item in data:
+ out_str += f"{(float(item) / 1e6):.2f}, "
+ return out_str[:-2] + "]"
+ except (AttributeError, IndexError, ValueError, KeyError):
+ return "Test Failed."
+
+ def _get_data_from_cps_test_msg(self, msg):
+ """Get info from message of NDRPDR CPS tests.
+
+ :param msg: Message to be processed.
+ :type msg: str
+ :returns: Processed message or "Test Failed." if a problem occurs.
+ :rtype: str
+ """
+
+ groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
+ if not groups or groups.lastindex != 2:
+ return "Test Failed."
+
+ try:
+ return (
+ f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
+ f"2. {(float(groups.group(2)) / 1e6):5.2f}"
+ )
+ except (AttributeError, IndexError, ValueError, KeyError):
+ return "Test Failed."
+
+ def _get_data_from_pps_test_msg(self, msg):
+ """Get info from message of NDRPDR PPS tests.
+
+ :param msg: Message to be processed.
+ :type msg: str
+ :returns: Processed message or "Test Failed." if a problem occurs.
+ :rtype: str
+ """
+
+ groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
+ if not groups or groups.lastindex != 4:
+ return "Test Failed."
+
+ try:
+ return (
+ f"1. {(float(groups.group(1)) / 1e6):5.2f} "
+ f"{float(groups.group(2)):5.2f}\n"
+ f"2. {(float(groups.group(3)) / 1e6):5.2f} "
+ f"{float(groups.group(4)):5.2f}"
+ )
+ except (AttributeError, IndexError, ValueError, KeyError):
+ return "Test Failed."
+
+ def _get_data_from_perf_test_msg(self, msg):
+ """Get info from message of NDRPDR performance tests.
+
+ :param msg: Message to be processed.
+ :type msg: str
+ :returns: Processed message or "Test Failed." if a problem occurs.
+ :rtype: str
+ """
+
groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
- if not groups or groups.lastindex != 20:
- return msg
+ if not groups or groups.lastindex != 10:
+ return "Test Failed."
try:
data = {
- u"ndr_low": float(groups.group(1)),
- u"ndr_low_unit": groups.group(2),
- u"ndr_low_b": float(groups.group(3)),
- u"ndr_low_b_unit": groups.group(4),
- u"ndr_lat_1": groups.group(5),
- u"ndr_lat_2": groups.group(6),
- u"ndr_up": float(groups.group(7)),
- u"ndr_up_unit": groups.group(8),
- u"ndr_up_b": float(groups.group(9)),
- u"ndr_up_b_unit": groups.group(10),
- u"pdr_low": float(groups.group(11)),
- u"pdr_low_unit": groups.group(12),
- u"pdr_low_b": float(groups.group(13)),
- u"pdr_low_b_unit": groups.group(14),
- u"pdr_lat_1": groups.group(15),
- u"pdr_lat_2": groups.group(16),
- u"pdr_up": float(groups.group(17)),
- u"pdr_up_unit": groups.group(18),
- u"pdr_up_b": float(groups.group(19)),
- u"pdr_up_b_unit": groups.group(20)
+ "ndr_low": float(groups.group(1)),
+ "ndr_low_b": float(groups.group(2)),
+ "pdr_low": float(groups.group(3)),
+ "pdr_low_b": float(groups.group(4)),
+ "pdr_lat_90_1": groups.group(5),
+ "pdr_lat_90_2": groups.group(6),
+ "pdr_lat_50_1": groups.group(7),
+ "pdr_lat_50_2": groups.group(8),
+ "pdr_lat_10_1": groups.group(9),
+ "pdr_lat_10_2": groups.group(10),
}
except (AttributeError, IndexError, ValueError, KeyError):
- return msg
+ return "Test Failed."
def _process_lat(in_str_1, in_str_2):
- """Extract min, avg, max values from latency string.
+ """Extract P50, P90 and P99 latencies or min, avg, max values from
+ latency string.
:param in_str_1: Latency string for one direction produced by robot
framework.
robot framework.
:type in_str_1: str
:type in_str_2: str
- :returns: Processed latency string or original string if a problem
- occurs.
- :rtype: tuple(str, str)
+ :returns: Processed latency string or None if a problem occurs.
+ :rtype: tuple
"""
in_list_1 = in_str_1.split('/', 3)
- if len(in_list_1) < 3:
- return u"Not Measured.", u"Not Measured."
-
in_list_2 = in_str_2.split('/', 3)
- if len(in_list_2) < 3:
- return u"Not Measured.", u"Not Measured."
- hdr_lat_1 = u""
- if len(in_list_1) == 4:
- in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
- try:
- hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
- except hdrh.codec.HdrLengthException:
- pass
- hdr_lat_2 = u""
- if len(in_list_2) == 4:
- in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
- try:
- hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
- except hdrh.codec.HdrLengthException:
- pass
+ if len(in_list_1) != 4 and len(in_list_2) != 4:
+ return None
+
+ in_list_1[3] += "=" * (len(in_list_1[3]) % 4)
+ try:
+ hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
+ except hdrh.codec.HdrLengthException:
+ hdr_lat_1 = None
+
+ in_list_2[3] += "=" * (len(in_list_2[3]) % 4)
+ try:
+ hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
+ except hdrh.codec.HdrLengthException:
+ hdr_lat_2 = None
- hdr_lat = u"Not Measured."
if hdr_lat_1 and hdr_lat_2:
hdr_lat = (
- f"50%/90%/99%/99.9%, "
- f"{hdr_lat_1.get_value_at_percentile(50.0)}/"
- f"{hdr_lat_1.get_value_at_percentile(90.0)}/"
- f"{hdr_lat_1.get_value_at_percentile(99.0)}/"
- f"{hdr_lat_1.get_value_at_percentile(99.9)}, "
- f"{hdr_lat_2.get_value_at_percentile(50.0)}/"
- f"{hdr_lat_2.get_value_at_percentile(90.0)}/"
- f"{hdr_lat_2.get_value_at_percentile(99.0)}/"
- f"{hdr_lat_2.get_value_at_percentile(99.9)} "
- f"uSec."
+ hdr_lat_1.get_value_at_percentile(50.0),
+ hdr_lat_1.get_value_at_percentile(90.0),
+ hdr_lat_1.get_value_at_percentile(99.0),
+ hdr_lat_2.get_value_at_percentile(50.0),
+ hdr_lat_2.get_value_at_percentile(90.0),
+ hdr_lat_2.get_value_at_percentile(99.0)
)
+ if all(hdr_lat):
+ return hdr_lat
- return (
- f"Min/Avg/Max, "
- f"{in_list_1[0]}/{in_list_1[1]}/{in_list_1[2]}, "
- f"{in_list_2[0]}/{in_list_2[1]}/{in_list_2[2]} uSec.",
- hdr_lat
+ hdr_lat = (
+ int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
+ int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
)
+ for item in hdr_lat:
+ if item in (-1, 4294967295, 0):
+ return None
+ return hdr_lat
try:
- pdr_lat = _process_lat(data[u'pdr_lat_1'], data[u'pdr_lat_2'])
- ndr_lat = _process_lat(data[u'ndr_lat_1'], data[u'ndr_lat_2'])
- return (
- f"NDR Throughput: {(data[u'ndr_low'] / 1e6):.2f} "
- f"M{data[u'ndr_low_unit']}, "
- f"{data[u'ndr_low_b']:.2f} {data[u'ndr_low_b_unit']}.\n"
- f"One-Way Latency at NDR: {ndr_lat[0]}\n"
- f"One-Way Latency at NDR by percentiles: {ndr_lat[1]}\n"
- f"PDR Throughput: {(data[u'pdr_low'] / 1e6):.2f} "
- f"M{data[u'pdr_low_unit']}, "
- f"{data[u'pdr_low_b']:.2f} {data[u'pdr_low_b_unit']}.\n"
- f"One-Way Latency at PDR: {pdr_lat[0]}\n"
- f"One-Way Latency at PDR by percentiles: {pdr_lat[1]}"
+ out_msg = (
+ f"1. {(data['ndr_low'] / 1e6):5.2f} "
+ f"{data['ndr_low_b']:5.2f}"
+ f"\n2. {(data['pdr_low'] / 1e6):5.2f} "
+ f"{data['pdr_low_b']:5.2f}"
)
+ latency = (
+ _process_lat(data['pdr_lat_10_1'], data['pdr_lat_10_2']),
+ _process_lat(data['pdr_lat_50_1'], data['pdr_lat_50_2']),
+ _process_lat(data['pdr_lat_90_1'], data['pdr_lat_90_2'])
+ )
+ if all(latency):
+ max_len = len(str(max((max(item) for item in latency))))
+ max_len = 4 if max_len < 4 else max_len
+
+ for idx, lat in enumerate(latency):
+ if not idx:
+ out_msg += "\n"
+ out_msg += (
+ f"\n{idx + 3}. "
+ f"{lat[0]:{max_len}d} "
+ f"{lat[1]:{max_len}d} "
+ f"{lat[2]:{max_len}d} "
+ f"{lat[3]:{max_len}d} "
+ f"{lat[4]:{max_len}d} "
+ f"{lat[5]:{max_len}d} "
+ )
+
+ return out_msg
+
except (AttributeError, IndexError, ValueError, KeyError):
- return msg
+ return "Test Failed."
def _get_testbed(self, msg):
"""Called when extraction of testbed IP is required.
:returns: Nothing.
"""
- if msg.message.count(u"Setup of TG node") or \
- msg.message.count(u"Setup of node TG host"):
+ if msg.message.count("Setup of TG node") or \
+ msg.message.count("Setup of node TG host"):
reg_tg_ip = re.compile(
r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
try:
except (KeyError, ValueError, IndexError, AttributeError):
pass
finally:
- self._data[u"metadata"][u"testbed"] = self._testbed
- self._msg_type = None
+ self._data["metadata"]["testbed"] = self._testbed
def _get_vpp_version(self, msg):
"""Called when extraction of VPP version is required.
:returns: Nothing.
"""
- if msg.message.count(u"return STDOUT Version:") or \
- msg.message.count(u"VPP Version:") or \
- msg.message.count(u"VPP version:"):
- self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
- group(2))
- self._data[u"metadata"][u"version"] = self._version
- self._msg_type = None
+ if msg.message.count("VPP version:") or \
+ msg.message.count("VPP Version:"):
+ self._version = str(
+ re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
+ )
+ self._data["metadata"]["version"] = self._version
def _get_dpdk_version(self, msg):
"""Called when extraction of DPDK version is required.
:returns: Nothing.
"""
- if msg.message.count(u"DPDK Version:"):
+ if msg.message.count("DPDK Version:"):
try:
self._version = str(re.search(
self.REGEX_VERSION_DPDK, msg.message).group(2))
- self._data[u"metadata"][u"version"] = self._version
+ self._data["metadata"]["version"] = self._version
except IndexError:
pass
- finally:
- self._msg_type = None
-
- def _get_timestamp(self, msg):
- """Called when extraction of timestamp is required.
-
- :param msg: Message to process.
- :type msg: Message
- :returns: Nothing.
- """
-
- self._timestamp = msg.timestamp[:14]
- self._data[u"metadata"][u"generated"] = self._timestamp
- self._msg_type = None
-
- def _get_vat_history(self, msg):
- """Called when extraction of VAT command history is required.
-
- TODO: Remove when not needed.
-
- :param msg: Message to process.
- :type msg: Message
- :returns: Nothing.
- """
- if msg.message.count(u"VAT command history:"):
- self._conf_history_lookup_nr += 1
- if self._conf_history_lookup_nr == 1:
- self._data[u"tests"][self._test_id][u"conf-history"] = str()
- else:
- self._msg_type = None
- text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
- r"VAT command history:", u"",
- msg.message, count=1).replace(u'\n', u' |br| ').\
- replace(u'"', u"'")
-
- self._data[u"tests"][self._test_id][u"conf-history"] += (
- f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
- )
def _get_papi_history(self, msg):
"""Called when extraction of PAPI command history is required.
:type msg: Message
:returns: Nothing.
"""
- if msg.message.count(u"PAPI command history:"):
+ if msg.message.count("PAPI command history:"):
self._conf_history_lookup_nr += 1
if self._conf_history_lookup_nr == 1:
- self._data[u"tests"][self._test_id][u"conf-history"] = str()
- else:
- self._msg_type = None
- text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
- r"PAPI command history:", u"",
- msg.message, count=1).replace(u'\n', u' |br| ').\
- replace(u'"', u"'")
- self._data[u"tests"][self._test_id][u"conf-history"] += (
- f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
- )
+ self._data["tests"][self._test_id]["conf-history"] = str()
+ text = re.sub(
+ r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
+ "",
+ msg.message,
+ count=1
+ ).replace('"', "'")
+ self._data["tests"][self._test_id]["conf-history"] += \
+ f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
def _get_show_run(self, msg):
"""Called when extraction of VPP operational data (output of CLI command
:returns: Nothing.
"""
- if not msg.message.count(u"stats runtime"):
+ if not msg.message.count("stats runtime"):
return
# Temporary solution
if self._sh_run_counter > 1:
return
- if u"show-run" not in self._data[u"tests"][self._test_id].keys():
- self._data[u"tests"][self._test_id][u"show-run"] = dict()
+ if "show-run" not in self._data["tests"][self._test_id].keys():
+ self._data["tests"][self._test_id]["show-run"] = dict()
groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
if not groups:
try:
host = groups.group(1)
except (AttributeError, IndexError):
- host = u""
+ host = ""
try:
sock = groups.group(2)
except (AttributeError, IndexError):
- sock = u""
+ sock = ""
+
+ dut = "dut{nr}".format(
+ nr=len(self._data['tests'][self._test_id]['show-run'].keys()) + 1)
+
+ self._data['tests'][self._test_id]['show-run'][dut] = \
+ copy.copy(
+ {
+ "host": host,
+ "socket": sock,
+ "runtime": str(msg.message).replace(' ', '').
+ replace('\n', '').replace("'", '"').
+ replace('b"', '"').replace('"', '"').
+ split(":", 1)[1]
+ }
+ )
- runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
- replace(u"'", u'"').replace(u'b"', u'"').
- replace(u'u"', u'"').split(u":", 1)[1])
+ def _get_telemetry(self, msg):
+ """Called when extraction of VPP telemetry data is required.
- try:
- threads_nr = len(runtime[0][u"clocks"])
- except (IndexError, KeyError):
+ :param msg: Message to process.
+ :type msg: Message
+ :returns: Nothing.
+ """
+
+ if self._telemetry_kw_counter > 1:
+ return
+ if not msg.message.count("# TYPE vpp_runtime_calls"):
return
- dut = u"DUT{nr}".format(
- nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
+ if "telemetry-show-run" not in \
+ self._data["tests"][self._test_id].keys():
+ self._data["tests"][self._test_id]["telemetry-show-run"] = dict()
- oper = {
- u"host": host,
- u"socket": sock,
- u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
+ self._telemetry_msg_counter += 1
+ groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
+ if not groups:
+ return
+ try:
+ host = groups.group(1)
+ except (AttributeError, IndexError):
+ host = ""
+ try:
+ sock = groups.group(2)
+ except (AttributeError, IndexError):
+ sock = ""
+ runtime = {
+ "source_type": "node",
+ "source_id": host,
+ "msg_type": "metric",
+ "log_level": "INFO",
+ "timestamp": msg.timestamp,
+ "msg": "show_runtime",
+ "host": host,
+ "socket": sock,
+ "data": list()
}
-
- for item in runtime:
- for idx in range(threads_nr):
- if item[u"vectors"][idx] > 0:
- clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
- elif item[u"calls"][idx] > 0:
- clocks = item[u"clocks"][idx] / item[u"calls"][idx]
- elif item[u"suspends"][idx] > 0:
- clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
- else:
- clocks = 0.0
-
- if item[u"calls"][idx] > 0:
- vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
- else:
- vectors_call = 0.0
-
- if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
- int(item[u"suspends"][idx]):
- oper[u"threads"][idx].append([
- item[u"name"],
- item[u"calls"][idx],
- item[u"vectors"][idx],
- item[u"suspends"][idx],
- clocks,
- vectors_call
- ])
-
- self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
+ for line in msg.message.splitlines():
+ if not line.startswith("vpp_runtime_"):
+ continue
+ try:
+ params, value, timestamp = line.rsplit(" ", maxsplit=2)
+ cut = params.index("{")
+ name = params[:cut].split("_", maxsplit=2)[-1]
+ labels = eval(
+ "dict" + params[cut:].replace('{', '(').replace('}', ')')
+ )
+ labels["graph_node"] = labels.pop("name")
+ runtime["data"].append(
+ {
+ "name": name,
+ "value": value,
+ "timestamp": timestamp,
+ "labels": labels
+ }
+ )
+ except (TypeError, ValueError, IndexError):
+ continue
+ self._data['tests'][self._test_id]['telemetry-show-run']\
+ [f"dut{self._telemetry_msg_counter}"] = copy.copy(
+ {
+ "host": host,
+ "socket": sock,
+ "runtime": runtime
+ }
+ )
def _get_ndrpdr_throughput(self, msg):
"""Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
"""
throughput = {
- u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
- u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
+ "NDR": {"LOWER": -1.0, "UPPER": -1.0},
+ "PDR": {"LOWER": -1.0, "UPPER": -1.0}
}
- status = u"FAIL"
+ status = "FAIL"
groups = re.search(self.REGEX_NDRPDR_RATE, msg)
if groups is not None:
try:
- throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
- throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
- throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
- throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
- status = u"PASS"
+ throughput["NDR"]["LOWER"] = float(groups.group(1))
+ throughput["NDR"]["UPPER"] = float(groups.group(2))
+ throughput["PDR"]["LOWER"] = float(groups.group(3))
+ throughput["PDR"]["UPPER"] = float(groups.group(4))
+ status = "PASS"
except (IndexError, ValueError):
pass
return throughput, status
+ def _get_ndrpdr_throughput_gbps(self, msg):
+ """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
+ test message.
+
+ :param msg: The test message to be parsed.
+ :type msg: str
+ :returns: Parsed data as a dict and the status (PASS/FAIL).
+ :rtype: tuple(dict, str)
+ """
+
+ gbps = {
+ "NDR": {"LOWER": -1.0, "UPPER": -1.0},
+ "PDR": {"LOWER": -1.0, "UPPER": -1.0}
+ }
+ status = "FAIL"
+ groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
+
+ if groups is not None:
+ try:
+ gbps["NDR"]["LOWER"] = float(groups.group(1))
+ gbps["NDR"]["UPPER"] = float(groups.group(2))
+ gbps["PDR"]["LOWER"] = float(groups.group(3))
+ gbps["PDR"]["UPPER"] = float(groups.group(4))
+ status = "PASS"
+ except (IndexError, ValueError):
+ pass
+
+ return gbps, status
+
def _get_plr_throughput(self, msg):
"""Get PLRsearch lower bound and PLRsearch upper bound from the test
message.
"""
throughput = {
- u"LOWER": -1.0,
- u"UPPER": -1.0
+ "LOWER": -1.0,
+ "UPPER": -1.0
}
- status = u"FAIL"
+ status = "FAIL"
groups = re.search(self.REGEX_PLR_RATE, msg)
if groups is not None:
try:
- throughput[u"LOWER"] = float(groups.group(1))
- throughput[u"UPPER"] = float(groups.group(2))
- status = u"PASS"
+ throughput["LOWER"] = float(groups.group(1))
+ throughput["UPPER"] = float(groups.group(2))
+ status = "PASS"
except (IndexError, ValueError):
pass
:rtype: tuple(dict, str)
"""
latency_default = {
- u"min": -1.0,
- u"avg": -1.0,
- u"max": -1.0,
- u"hdrh": u""
+ "min": -1.0,
+ "avg": -1.0,
+ "max": -1.0,
+ "hdrh": ""
}
latency = {
- u"NDR": {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
+ "NDR": {
+ "direction1": copy.copy(latency_default),
+ "direction2": copy.copy(latency_default)
},
- u"PDR": {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
+ "PDR": {
+ "direction1": copy.copy(latency_default),
+ "direction2": copy.copy(latency_default)
},
- u"LAT0": {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
+ "LAT0": {
+ "direction1": copy.copy(latency_default),
+ "direction2": copy.copy(latency_default)
},
- u"PDR10": {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
+ "PDR10": {
+ "direction1": copy.copy(latency_default),
+ "direction2": copy.copy(latency_default)
},
- u"PDR50": {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
+ "PDR50": {
+ "direction1": copy.copy(latency_default),
+ "direction2": copy.copy(latency_default)
},
- u"PDR90": {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
+ "PDR90": {
+ "direction1": copy.copy(latency_default),
+ "direction2": copy.copy(latency_default)
},
}
- # TODO: Rewrite when long and base are not needed
- groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
- if groups is None:
- groups = re.search(self.REGEX_NDRPDR_LAT, msg)
+ groups = re.search(self.REGEX_NDRPDR_LAT, msg)
if groups is None:
groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
if groups is None:
- return latency, u"FAIL"
+ return latency, "FAIL"
def process_latency(in_str):
"""Return object with parsed latency values.
in_list = in_str.split('/', 3)
rval = {
- u"min": float(in_list[0]),
- u"avg": float(in_list[1]),
- u"max": float(in_list[2]),
- u"hdrh": u""
+ "min": float(in_list[0]),
+ "avg": float(in_list[1]),
+ "max": float(in_list[2]),
+ "hdrh": ""
}
if len(in_list) == 4:
- rval[u"hdrh"] = str(in_list[3])
+ rval["hdrh"] = str(in_list[3])
return rval
try:
- latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
- latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
- latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
- latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
+ latency["NDR"]["direction1"] = process_latency(groups.group(1))
+ latency["NDR"]["direction2"] = process_latency(groups.group(2))
+ latency["PDR"]["direction1"] = process_latency(groups.group(3))
+ latency["PDR"]["direction2"] = process_latency(groups.group(4))
if groups.lastindex == 4:
- return latency, u"PASS"
+ return latency, "PASS"
except (IndexError, ValueError):
pass
try:
- latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
- latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
- latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
- latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
- latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
- latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
- latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
- latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
+ latency["PDR90"]["direction1"] = process_latency(groups.group(5))
+ latency["PDR90"]["direction2"] = process_latency(groups.group(6))
+ latency["PDR50"]["direction1"] = process_latency(groups.group(7))
+ latency["PDR50"]["direction2"] = process_latency(groups.group(8))
+ latency["PDR10"]["direction1"] = process_latency(groups.group(9))
+ latency["PDR10"]["direction2"] = process_latency(groups.group(10))
+ latency["LAT0"]["direction1"] = process_latency(groups.group(11))
+ latency["LAT0"]["direction2"] = process_latency(groups.group(12))
if groups.lastindex == 12:
- return latency, u"PASS"
+ return latency, "PASS"
except (IndexError, ValueError):
pass
- # TODO: Remove when not needed
- latency[u"NDR10"] = {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
- }
- latency[u"NDR50"] = {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
- }
- latency[u"NDR90"] = {
- u"direction1": copy.copy(latency_default),
- u"direction2": copy.copy(latency_default)
- }
- try:
- latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
- latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
- latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
- latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
- latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
- latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
- latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
- latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
- latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
- latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
- latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
- latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
- latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
- latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
- return latency, u"PASS"
- except (IndexError, ValueError):
- pass
+ return latency, "FAIL"
+
+ @staticmethod
+ def _get_hoststack_data(msg, tags):
+ """Get data from the hoststack test message.
+
+ :param msg: The test message to be parsed.
+ :param tags: Test tags.
+ :type msg: str
+ :type tags: list
+ :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
+ :rtype: tuple(dict, str)
+ """
+ result = dict()
+ status = "FAIL"
+
+ msg = msg.replace("'", '"').replace(" ", "")
+ if "LDPRELOAD" in tags:
+ try:
+ result = loads(msg)
+ status = "PASS"
+ except JSONDecodeError:
+ pass
+ elif "VPPECHO" in tags:
+ try:
+ msg_lst = msg.replace("}{", "} {").split(" ")
+ result = dict(
+ client=loads(msg_lst[0]),
+ server=loads(msg_lst[1])
+ )
+ status = "PASS"
+ except (JSONDecodeError, IndexError):
+ pass
+
+ return result, status
+
+ def _get_vsap_data(self, msg, tags):
+ """Get data from the vsap test message.
+
+ :param msg: The test message to be parsed.
+ :param tags: Test tags.
+ :type msg: str
+ :type tags: list
+ :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
+ :rtype: tuple(dict, str)
+ """
+ result = dict()
+ status = "FAIL"
- return latency, u"FAIL"
+ groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
+ if groups is not None:
+ try:
+ result["transfer-rate"] = float(groups.group(1)) * 1e3
+ result["latency"] = float(groups.group(2))
+ result["completed-requests"] = int(groups.group(5))
+ result["failed-requests"] = int(groups.group(6))
+ result["bytes-transferred"] = int(groups.group(4))
+ if "TCP_CPS" in tags:
+ result["cps"] = float(groups.group(3))
+ elif "TCP_RPS" in tags:
+ result["rps"] = float(groups.group(3))
+ else:
+ return result, status
+ status = "PASS"
+ except (IndexError, ValueError) as err:
+ logging.warning(err)
+ return result, status
def visit_suite(self, suite):
"""Implements traversing through the suite and its direct children.
:returns: Nothing.
"""
if self.start_suite(suite) is not False:
+ suite.setup.visit(self)
suite.suites.visit(self)
suite.tests.visit(self)
+ suite.teardown.visit(self)
self.end_suite(suite)
def start_suite(self, suite):
:type suite: Suite
:returns: Nothing.
"""
-
try:
parent_name = suite.parent.name
except AttributeError:
return
- doc_str = suite.doc.\
- replace(u'"', u"'").\
- replace(u'\n', u' ').\
- replace(u'\r', u'').\
- replace(u'*[', u' |br| *[').\
- replace(u"*", u"**").\
- replace(u' |br| *[', u'*[', 1)
-
- self._data[u"suites"][suite.longname.lower().
- replace(u'"', u"'").
- replace(u" ", u"_")] = {
- u"name": suite.name.lower(),
- u"doc": doc_str,
- u"parent": parent_name,
- u"level": len(suite.longname.split(u"."))
- }
-
- suite.keywords.visit(self)
-
- def end_suite(self, suite):
- """Called when suite ends.
-
- :param suite: Suite to process.
- :type suite: Suite
- :returns: Nothing.
- """
+ self._data["suites"][suite.longname.lower().replace('"', "'").\
+ replace(" ", "_")] = {
+ "name": suite.name.lower(),
+ "doc": suite.doc,
+ "parent": parent_name,
+ "level": len(suite.longname.split("."))
+ }
def visit_test(self, test):
"""Implements traversing through the test.
:returns: Nothing.
"""
if self.start_test(test) is not False:
- test.keywords.visit(self)
+ test.setup.visit(self)
+ test.body.visit(self)
+ test.teardown.visit(self)
self.end_test(test)
def start_test(self, test):
"""
self._sh_run_counter = 0
+ self._telemetry_kw_counter = 0
+ self._telemetry_msg_counter = 0
longname_orig = test.longname.lower()
# Change the TC long name and name if defined in the mapping table
longname = self._mapping.get(longname_orig, None)
if longname is not None:
- name = longname.split(u'.')[-1]
- logging.debug(
- f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
- f"{name}"
- )
+ name = longname.split('.')[-1]
else:
longname = longname_orig
name = test.name.lower()
# Remove TC number from the TC long name (backward compatibility):
- self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
+ self._test_id = re.sub(self.REGEX_TC_NUMBER, "", longname)
# Remove TC number from the TC name (not needed):
- test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
-
- test_result[u"parent"] = test.parent.name.lower()
- test_result[u"tags"] = tags
- test_result["doc"] = test.doc.\
- replace(u'"', u"'").\
- replace(u'\n', u' ').\
- replace(u'\r', u'').\
- replace(u'[', u' |br| [').\
- replace(u' |br| [', u'[', 1)
- test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
- replace(u'\n', u' |br| ').\
- replace(u'\r', u'').\
- replace(u'"', u"'")
- test_result[u"type"] = u"FUNC"
- test_result[u"status"] = test.status
-
- if u"PERFTEST" in tags:
+ test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
+
+ test_result["parent"] = test.parent.name.lower()
+ test_result["tags"] = tags
+ test_result["doc"] = test.doc
+ test_result["type"] = ""
+ test_result["status"] = test.status
+ test_result["starttime"] = test.starttime
+ test_result["endtime"] = test.endtime
+
+ if test.status == "PASS":
+ if "NDRPDR" in tags:
+ if "TCP_PPS" in tags or "UDP_PPS" in tags:
+ test_result["msg"] = self._get_data_from_pps_test_msg(
+ test.message)
+ elif "TCP_CPS" in tags or "UDP_CPS" in tags:
+ test_result["msg"] = self._get_data_from_cps_test_msg(
+ test.message)
+ else:
+ test_result["msg"] = self._get_data_from_perf_test_msg(
+ test.message)
+ elif "MRR" in tags or "FRMOBL" in tags or "BMRR" in tags:
+ test_result["msg"] = self._get_data_from_mrr_test_msg(
+ test.message)
+ else:
+ test_result["msg"] = test.message
+ else:
+ test_result["msg"] = test.message
+
+ if "PERFTEST" in tags and "TREX" not in tags:
# Replace info about cores (e.g. -1c-) with the info about threads
# and cores (e.g. -1t1c-) in the long test case names and in the
# test case names if necessary.
- groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
- if not groups:
- tag_count = 0
- tag_tc = str()
- for tag in test_result[u"tags"]:
- groups = re.search(self.REGEX_TC_TAG, tag)
- if groups:
- tag_count += 1
- tag_tc = tag
-
- if tag_count == 1:
- self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
- f"-{tag_tc.lower()}-",
- self._test_id,
- count=1)
- test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
- f"-{tag_tc.lower()}-",
- test_result["name"],
- count=1)
- else:
- test_result[u"status"] = u"FAIL"
- self._data[u"tests"][self._test_id] = test_result
- logging.debug(
- f"The test {self._test_id} has no or more than one "
- f"multi-threading tags.\n"
- f"Tags: {test_result[u'tags']}"
- )
- return
+ tag_count = 0
+ tag_tc = str()
+ for tag in test_result["tags"]:
+ groups = re.search(self.REGEX_TC_TAG, tag)
+ if groups:
+ tag_count += 1
+ tag_tc = tag
+
+ if tag_count == 1:
+ self._test_id = re.sub(
+ self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
+ self._test_id, count=1
+ )
+ test_result["name"] = re.sub(
+ self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
+ test_result["name"], count=1
+ )
+ else:
+ test_result["status"] = "FAIL"
+ self._data["tests"][self._test_id] = test_result
+ logging.debug(
+ f"The test {self._test_id} has no or more than one "
+ f"multi-threading tags.\n"
+ f"Tags: {test_result['tags']}"
+ )
+ return
- if test.status == u"PASS":
- if u"NDRPDR" in tags:
- test_result[u"type"] = u"NDRPDR"
- test_result[u"throughput"], test_result[u"status"] = \
+ if "DEVICETEST" in tags:
+ test_result["type"] = "DEVICETEST"
+ elif "NDRPDR" in tags:
+ if "TCP_CPS" in tags or "UDP_CPS" in tags:
+ test_result["type"] = "CPS"
+ else:
+ test_result["type"] = "NDRPDR"
+ if test.status == "PASS":
+ test_result["throughput"], test_result["status"] = \
self._get_ndrpdr_throughput(test.message)
- test_result[u"latency"], test_result[u"status"] = \
+ test_result["gbps"], test_result["status"] = \
+ self._get_ndrpdr_throughput_gbps(test.message)
+ test_result["latency"], test_result["status"] = \
self._get_ndrpdr_latency(test.message)
- elif u"SOAK" in tags:
- test_result[u"type"] = u"SOAK"
- test_result[u"throughput"], test_result[u"status"] = \
- self._get_plr_throughput(test.message)
- elif u"TCP" in tags:
- test_result[u"type"] = u"TCP"
- groups = re.search(self.REGEX_TCP, test.message)
- test_result[u"result"] = int(groups.group(2))
- elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
- if u"MRR" in tags:
- test_result[u"type"] = u"MRR"
- else:
- test_result[u"type"] = u"BMRR"
-
- test_result[u"result"] = dict()
+ elif "MRR" in tags or "FRMOBL" in tags or "BMRR" in tags:
+ if "MRR" in tags:
+ test_result["type"] = "MRR"
+ else:
+ test_result["type"] = "BMRR"
+ if test.status == "PASS":
+ test_result["result"] = dict()
groups = re.search(self.REGEX_BMRR, test.message)
if groups is not None:
items_str = groups.group(1)
- items_float = [float(item.strip()) for item
- in items_str.split(",")]
+ items_float = [
+ float(item.strip().replace("'", ""))
+ for item in items_str.split(",")
+ ]
# Use whole list in CSIT-1180.
stats = jumpavg.AvgStdevStats.for_runs(items_float)
- test_result[u"result"][u"receive-rate"] = stats.avg
+ test_result["result"]["samples"] = items_float
+ test_result["result"]["receive-rate"] = stats.avg
+ test_result["result"]["receive-stdev"] = stats.stdev
else:
groups = re.search(self.REGEX_MRR, test.message)
- test_result[u"result"][u"receive-rate"] = \
+ test_result["result"]["receive-rate"] = \
float(groups.group(3)) / float(groups.group(1))
- elif u"RECONF" in tags:
- test_result[u"type"] = u"RECONF"
- test_result[u"result"] = None
+ elif "SOAK" in tags:
+ test_result["type"] = "SOAK"
+ if test.status == "PASS":
+ test_result["throughput"], test_result["status"] = \
+ self._get_plr_throughput(test.message)
+ elif "LDP_NGINX" in tags:
+ test_result["type"] = "LDP_NGINX"
+ test_result["result"], test_result["status"] = \
+ self._get_vsap_data(test.message, tags)
+ elif "HOSTSTACK" in tags:
+ test_result["type"] = "HOSTSTACK"
+ if test.status == "PASS":
+ test_result["result"], test_result["status"] = \
+ self._get_hoststack_data(test.message, tags)
+ elif "RECONF" in tags:
+ test_result["type"] = "RECONF"
+ if test.status == "PASS":
+ test_result["result"] = None
try:
grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
- test_result[u"result"] = {
- u"loss": int(grps_loss.group(1)),
- u"time": float(grps_time.group(1))
+ test_result["result"] = {
+ "loss": int(grps_loss.group(1)),
+ "time": float(grps_time.group(1))
}
except (AttributeError, IndexError, ValueError, TypeError):
- test_result[u"status"] = u"FAIL"
- else:
- test_result[u"status"] = u"FAIL"
- self._data[u"tests"][self._test_id] = test_result
- return
-
- self._data[u"tests"][self._test_id] = test_result
-
- def end_test(self, test):
- """Called when test ends.
+ test_result["status"] = "FAIL"
+ else:
+ test_result["status"] = "FAIL"
- :param test: Test to process.
- :type test: Test
- :returns: Nothing.
- """
+ self._data["tests"][self._test_id] = test_result
- def visit_keyword(self, keyword):
+ def visit_keyword(self, kw):
"""Implements traversing through the keyword and its child keywords.
:param keyword: Keyword to process.
:type keyword: Keyword
:returns: Nothing.
"""
- if self.start_keyword(keyword) is not False:
- self.end_keyword(keyword)
+ if self.start_keyword(kw) is not False:
+ if hasattr(kw, "body"):
+ kw.body.visit(self)
+ kw.teardown.visit(self)
+ self.end_keyword(kw)
def start_keyword(self, keyword):
"""Called when keyword starts. Default implementation does nothing.
:type keyword: Keyword
:returns: Nothing.
"""
- try:
- if keyword.type == u"setup":
- self.visit_setup_kw(keyword)
- elif keyword.type == u"teardown":
- self.visit_teardown_kw(keyword)
- else:
- self.visit_test_kw(keyword)
- except AttributeError:
- pass
+ self._kw_name = keyword.name
def end_keyword(self, keyword):
"""Called when keyword ends. Default implementation does nothing.
:type keyword: Keyword
:returns: Nothing.
"""
-
- def visit_test_kw(self, test_kw):
- """Implements traversing through the test keyword and its child
- keywords.
-
- :param test_kw: Keyword to process.
- :type test_kw: Keyword
- :returns: Nothing.
- """
- for keyword in test_kw.keywords:
- if self.start_test_kw(keyword) is not False:
- self.visit_test_kw(keyword)
- self.end_test_kw(keyword)
-
- def start_test_kw(self, test_kw):
- """Called when test keyword starts. Default implementation does
- nothing.
-
- :param test_kw: Keyword to process.
- :type test_kw: Keyword
- :returns: Nothing.
- """
- if test_kw.name.count(u"Show Runtime On All Duts") or \
- test_kw.name.count(u"Show Runtime Counters On All Duts"):
- self._msg_type = u"test-show-runtime"
- self._sh_run_counter += 1
- elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
- self._msg_type = u"dpdk-version"
- else:
- return
- test_kw.messages.visit(self)
-
- def end_test_kw(self, test_kw):
- """Called when keyword ends. Default implementation does nothing.
-
- :param test_kw: Keyword to process.
- :type test_kw: Keyword
- :returns: Nothing.
- """
-
- def visit_setup_kw(self, setup_kw):
- """Implements traversing through the teardown keyword and its child
- keywords.
-
- :param setup_kw: Keyword to process.
- :type setup_kw: Keyword
- :returns: Nothing.
- """
- for keyword in setup_kw.keywords:
- if self.start_setup_kw(keyword) is not False:
- self.visit_setup_kw(keyword)
- self.end_setup_kw(keyword)
-
- def start_setup_kw(self, setup_kw):
- """Called when teardown keyword starts. Default implementation does
- nothing.
-
- :param setup_kw: Keyword to process.
- :type setup_kw: Keyword
- :returns: Nothing.
- """
- if setup_kw.name.count(u"Show Vpp Version On All Duts") \
- and not self._version:
- self._msg_type = u"vpp-version"
- elif setup_kw.name.count(u"Set Global Variable") \
- and not self._timestamp:
- self._msg_type = u"timestamp"
- elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
- self._msg_type = u"testbed"
- else:
- return
- setup_kw.messages.visit(self)
-
- def end_setup_kw(self, setup_kw):
- """Called when keyword ends. Default implementation does nothing.
-
- :param setup_kw: Keyword to process.
- :type setup_kw: Keyword
- :returns: Nothing.
- """
-
- def visit_teardown_kw(self, teardown_kw):
- """Implements traversing through the teardown keyword and its child
- keywords.
-
- :param teardown_kw: Keyword to process.
- :type teardown_kw: Keyword
- :returns: Nothing.
- """
- for keyword in teardown_kw.keywords:
- if self.start_teardown_kw(keyword) is not False:
- self.visit_teardown_kw(keyword)
- self.end_teardown_kw(keyword)
-
- def start_teardown_kw(self, teardown_kw):
- """Called when teardown keyword starts
-
- :param teardown_kw: Keyword to process.
- :type teardown_kw: Keyword
- :returns: Nothing.
- """
-
- if teardown_kw.name.count(u"Show Vat History On All Duts"):
- # TODO: Remove when not needed:
- self._conf_history_lookup_nr = 0
- self._msg_type = u"teardown-vat-history"
- teardown_kw.messages.visit(self)
- elif teardown_kw.name.count(u"Show Papi History On All Duts"):
- self._conf_history_lookup_nr = 0
- self._msg_type = u"teardown-papi-history"
- teardown_kw.messages.visit(self)
-
- def end_teardown_kw(self, teardown_kw):
- """Called when keyword ends. Default implementation does nothing.
-
- :param teardown_kw: Keyword to process.
- :type teardown_kw: Keyword
- :returns: Nothing.
- """
+ _ = keyword
+ self._kw_name = None
def visit_message(self, msg):
"""Implements visiting the message.
:type msg: Message
:returns: Nothing.
"""
-
- if self._msg_type:
- self.parse_msg[self._msg_type](msg)
-
- def end_message(self, msg):
- """Called when message ends. Default implementation does nothing.
-
- :param msg: Message to process.
- :type msg: Message
- :returns: Nothing.
- """
+ if self._kw_name is None:
+ return
+ elif self._kw_name.count("Run Telemetry On All Duts"):
+ if self._process_oper:
+ self._telemetry_kw_counter += 1
+ self._get_telemetry(msg)
+ elif self._kw_name.count("Show Runtime On All Duts"):
+ if self._process_oper:
+ self._sh_run_counter += 1
+ self._get_show_run(msg)
+ elif self._kw_name.count("Show Vpp Version On All Duts"):
+ if not self._version:
+ self._get_vpp_version(msg)
+ elif self._kw_name.count("Install Dpdk Framework On All Duts"):
+ if not self._version:
+ self._get_dpdk_version(msg)
+ elif self._kw_name.count("Setup Framework"):
+ if not self._testbed:
+ self._get_testbed(msg)
+ elif self._kw_name.count("Show Papi History On All Duts"):
+ self._conf_history_lookup_nr = 0
+ self._get_papi_history(msg)
class InputData:
(as described in ExecutionChecker documentation)
"""
- def __init__(self, spec):
+ def __init__(self, spec, for_output):
"""Initialization.
:param spec: Specification.
+ :param for_output: Output to be generated from downloaded data.
:type spec: Specification
+ :type for_output: str
"""
# Specification:
self._cfg = spec
+ self._for_output = for_output
+
# Data store:
- self._input_data = pd.Series()
+ self._input_data = pd.Series(dtype="float64")
@property
def data(self):
:returns: Metadata
:rtype: pandas.Series
"""
-
- return self.data[job][build][u"metadata"]
+ return self.data[job][build]["metadata"]
def suites(self, job, build):
"""Getter - suites
:returns: Suites.
:rtype: pandas.Series
"""
-
- return self.data[job][str(build)][u"suites"]
+ return self.data[job][str(build)]["suites"]
def tests(self, job, build):
"""Getter - tests
:returns: Tests.
:rtype: pandas.Series
"""
+ return self.data[job][build]["tests"]
- return self.data[job][build][u"tests"]
-
- def _parse_tests(self, job, build, log):
+ def _parse_tests(self, job, build):
"""Process data from robot output.xml file and return JSON structured
data.
:param job: The name of job which build output data will be processed.
:param build: The build which output data will be processed.
- :param log: List of log messages.
:type job: str
:type build: dict
- :type log: list of tuples (severity, msg)
:returns: JSON data structure.
:rtype: dict
"""
metadata = {
- u"job": job,
- u"build": build
+ "job": job,
+ "build": build
}
- with open(build[u"file-name"], u'r') as data_file:
+ with open(build["file-name"], 'r') as data_file:
try:
result = ExecutionResult(data_file)
except errors.DataError as err:
- log.append(
- (u"ERROR", f"Error occurred while parsing output.xml: "
- f"{repr(err)}")
+ logging.error(
+ f"Error occurred while parsing output.xml: {repr(err)}"
)
return None
- checker = ExecutionChecker(metadata, self._cfg.mapping,
- self._cfg.ignore)
+
+ process_oper = False
+ if "-vpp-perf-report-coverage-" in job:
+ process_oper = True
+ # elif "-vpp-perf-report-iterative-" in job:
+ # # Exceptions for TBs where we do not have coverage data:
+ # for item in ("-2n-icx", ):
+ # if item in job:
+ # process_oper = True
+ # break
+ checker = ExecutionChecker(
+ metadata, self._cfg.mapping, self._cfg.ignore, process_oper
+ )
result.visit(checker)
+ checker.data["metadata"]["tests_total"] = \
+ result.statistics.total.total
+ checker.data["metadata"]["tests_passed"] = \
+ result.statistics.total.passed
+ checker.data["metadata"]["tests_failed"] = \
+ result.statistics.total.failed
+ checker.data["metadata"]["elapsedtime"] = result.suite.elapsedtime
+ checker.data["metadata"]["generated"] = result.suite.endtime[:14]
+
return checker.data
def _download_and_parse_build(self, job, build, repeat, pid=10000):
:type repeat: int
"""
- logs = list()
+ logging.info(f"Processing the job/build: {job}: {build['build']}")
- logs.append(
- (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
- )
-
- state = u"failed"
+ state = "failed"
success = False
data = None
do_repeat = repeat
while do_repeat:
- success = download_and_unzip_data_file(self._cfg, job, build, pid,
- logs)
+ success = download_and_unzip_data_file(self._cfg, job, build, pid)
if success:
break
do_repeat -= 1
if not success:
- logs.append(
- (u"ERROR",
- f"It is not possible to download the input data file from the "
- f"job {job}, build {build[u'build']}, or it is damaged. "
- f"Skipped.")
+ logging.error(
+ f"It is not possible to download the input data file from the "
+ f"job {job}, build {build['build']}, or it is damaged. "
+ f"Skipped."
)
if success:
- logs.append(
- (u"INFO",
- f" Processing data from the build {build[u'build']} ...")
- )
- data = self._parse_tests(job, build, logs)
+ logging.info(f" Processing data from build {build['build']}")
+ data = self._parse_tests(job, build)
if data is None:
- logs.append(
- (u"ERROR",
- f"Input data file from the job {job}, build "
- f"{build[u'build']} is damaged. Skipped.")
+ logging.error(
+ f"Input data file from the job {job}, build "
+ f"{build['build']} is damaged. Skipped."
)
else:
- state = u"processed"
+ state = "processed"
try:
- remove(build[u"file-name"])
+ remove(build["file-name"])
except OSError as err:
- logs.append(
- ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
- f"{repr(err)}")
+ logging.error(
+ f"Cannot remove the file {build['file-name']}: {repr(err)}"
)
# If the time-period is defined in the specification file, remove all
# files which are outside the time period.
- timeperiod = self._cfg.input.get(u"time-period", None)
+ is_last = False
+ timeperiod = self._cfg.environment.get("time-period", None)
if timeperiod and data:
now = dt.utcnow()
timeperiod = timedelta(int(timeperiod))
- metadata = data.get(u"metadata", None)
+ metadata = data.get("metadata", None)
if metadata:
- generated = metadata.get(u"generated", None)
+ generated = metadata.get("generated", None)
if generated:
- generated = dt.strptime(generated, u"%Y%m%d %H:%M")
+ generated = dt.strptime(generated, "%Y%m%d %H:%M")
if (now - generated) > timeperiod:
# Remove the data and the file:
- state = u"removed"
+ state = "removed"
data = None
- logs.append(
- (u"INFO",
- f" The build {job}/{build[u'build']} is "
- f"outdated, will be removed.")
+ is_last = True
+ logging.info(
+ f" The build {job}/{build['build']} is "
+ f"outdated, will be removed."
)
- logs.append((u"INFO", u" Done."))
-
- for level, line in logs:
- if level == u"INFO":
- logging.info(line)
- elif level == u"ERROR":
- logging.error(line)
- elif level == u"DEBUG":
- logging.debug(line)
- elif level == u"CRITICAL":
- logging.critical(line)
- elif level == u"WARNING":
- logging.warning(line)
-
- return {u"data": data, u"state": state, u"job": job, u"build": build}
+ return {
+ "data": data,
+ "state": state,
+ "job": job,
+ "build": build,
+ "last": is_last
+ }
def download_and_parse_data(self, repeat=1):
"""Download the input data files, parse input data from input files and
:type repeat: int
"""
- logging.info(u"Downloading and parsing input files ...")
+ logging.info("Downloading and parsing input files ...")
- for job, builds in self._cfg.builds.items():
+ for job, builds in self._cfg.input.items():
for build in builds:
result = self._download_and_parse_build(job, build, repeat)
- build_nr = result[u"build"][u"build"]
+ if result["last"]:
+ break
+ build_nr = result["build"]["build"]
- if result[u"data"]:
- data = result[u"data"]
+ if result["data"]:
+ data = result["data"]
build_data = pd.Series({
- u"metadata": pd.Series(
- list(data[u"metadata"].values()),
- index=list(data[u"metadata"].keys())
+ "metadata": pd.Series(
+ list(data["metadata"].values()),
+ index=list(data["metadata"].keys())
),
- u"suites": pd.Series(
- list(data[u"suites"].values()),
- index=list(data[u"suites"].keys())
+ "suites": pd.Series(
+ list(data["suites"].values()),
+ index=list(data["suites"].keys())
),
- u"tests": pd.Series(
- list(data[u"tests"].values()),
- index=list(data[u"tests"].keys())
+ "tests": pd.Series(
+ list(data["tests"].values()),
+ index=list(data["tests"].keys())
)
})
if self._input_data.get(job, None) is None:
- self._input_data[job] = pd.Series()
+ self._input_data[job] = pd.Series(dtype="float64")
self._input_data[job][str(build_nr)] = build_data
-
self._cfg.set_input_file_name(
- job, build_nr, result[u"build"][u"file-name"])
-
- self._cfg.set_input_state(job, build_nr, result[u"state"])
+ job, build_nr, result["build"]["file-name"]
+ )
+ self._cfg.set_input_state(job, build_nr, result["state"])
mem_alloc = \
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
- logging.info(u"Done.")
+ logging.info("Done.")
+
+ msg = f"Successful downloads from the sources:\n"
+ for source in self._cfg.environment["data-sources"]:
+ if source["successful-downloads"]:
+ msg += (
+ f"{source['url']}/{source['path']}/"
+ f"{source['file-name']}: "
+ f"{source['successful-downloads']}\n"
+ )
+ logging.info(msg)
+
+ def process_local_file(self, local_file, job="local", build_nr=1,
+ replace=True):
+ """Process local XML file given as a command-line parameter.
+
+ :param local_file: The file to process.
+ :param job: Job name.
+ :param build_nr: Build number.
+ :param replace: If True, the information about jobs and builds is
+ replaced by the new one, otherwise the new jobs and builds are
+ added.
+ :type local_file: str
+ :type job: str
+ :type build_nr: int
+ :type replace: bool
+ :raises: PresentationError if an error occurs.
+ """
+ if not isfile(local_file):
+ raise PresentationError(f"The file {local_file} does not exist.")
+
+ try:
+ build_nr = int(local_file.split("/")[-1].split(".")[0])
+ except (IndexError, ValueError):
+ pass
+
+ build = {
+ "build": build_nr,
+ "status": "failed",
+ "file-name": local_file
+ }
+ if replace:
+ self._cfg.input = dict()
+ self._cfg.add_build(job, build)
+
+ logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
+ data = self._parse_tests(job, build)
+ if data is None:
+ raise PresentationError(
+ f"Error occurred while parsing the file {local_file}"
+ )
+
+ build_data = pd.Series({
+ "metadata": pd.Series(
+ list(data["metadata"].values()),
+ index=list(data["metadata"].keys())
+ ),
+ "suites": pd.Series(
+ list(data["suites"].values()),
+ index=list(data["suites"].keys())
+ ),
+ "tests": pd.Series(
+ list(data["tests"].values()),
+ index=list(data["tests"].keys())
+ )
+ })
+
+ if self._input_data.get(job, None) is None:
+ self._input_data[job] = pd.Series(dtype="float64")
+ self._input_data[job][str(build_nr)] = build_data
+
+ self._cfg.set_input_state(job, build_nr, "processed")
+
+ def process_local_directory(self, local_dir, replace=True):
+ """Process local directory with XML file(s). The directory is processed
+ as a 'job' and the XML files in it as builds.
+ If the given directory contains only sub-directories, these
+ sub-directories processed as jobs and corresponding XML files as builds
+ of their job.
+
+ :param local_dir: Local directory to process.
+ :param replace: If True, the information about jobs and builds is
+ replaced by the new one, otherwise the new jobs and builds are
+ added.
+ :type local_dir: str
+ :type replace: bool
+ """
+ if not isdir(local_dir):
+ raise PresentationError(
+ f"The directory {local_dir} does not exist."
+ )
+
+ # Check if the given directory includes only files, or only directories
+ _, dirnames, filenames = next(walk(local_dir))
+
+ if filenames and not dirnames:
+ filenames.sort()
+ # local_builds:
+ # key: dir (job) name, value: list of file names (builds)
+ local_builds = {
+ local_dir: [join(local_dir, name) for name in filenames]
+ }
+
+ elif dirnames and not filenames:
+ dirnames.sort()
+ # local_builds:
+ # key: dir (job) name, value: list of file names (builds)
+ local_builds = dict()
+ for dirname in dirnames:
+ builds = [
+ join(local_dir, dirname, name)
+ for name in listdir(join(local_dir, dirname))
+ if isfile(join(local_dir, dirname, name))
+ ]
+ if builds:
+ local_builds[dirname] = sorted(builds)
+
+ elif not filenames and not dirnames:
+ raise PresentationError(f"The directory {local_dir} is empty.")
+ else:
+ raise PresentationError(
+ f"The directory {local_dir} can include only files or only "
+ f"directories, not both.\nThe directory {local_dir} includes "
+ f"file(s):\n{filenames}\nand directories:\n{dirnames}"
+ )
+
+ if replace:
+ self._cfg.input = dict()
+
+ for job, files in local_builds.items():
+ for idx, local_file in enumerate(files):
+ self.process_local_file(local_file, job, idx + 1, replace=False)
@staticmethod
- def _end_of_tag(tag_filter, start=0, closer=u"'"):
+ def _end_of_tag(tag_filter, start=0, closer="'"):
"""Return the index of character in the string which is the end of tag.
:param tag_filter: The string where the end of tag is being searched.
:returns: The index of the tag closer.
:rtype: int
"""
-
try:
idx_opener = tag_filter.index(closer, start)
return tag_filter.index(closer, idx_opener + 1)
:returns: Conditional statement which can be evaluated.
:rtype: str
"""
-
index = 0
while True:
index = InputData._end_of_tag(tag_filter, index)
if index is None:
return tag_filter
index += 1
- tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
+ tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
- def filter_data(self, element, params=None, data=None, data_set=u"tests",
+ def filter_data(self, element, params=None, data=None, data_set="tests",
continue_on_error=False):
"""Filter required data from the given jobs and builds.
The output data structure is:
-
- job 1
- build 1
- test (or suite) 1 ID:
try:
if data_set == "suites":
- cond = u"True"
- elif element[u"filter"] in (u"all", u"template"):
- cond = u"True"
+ cond = "True"
+ elif element["filter"] in ("all", "template"):
+ cond = "True"
else:
- cond = InputData._condition(element[u"filter"])
+ cond = InputData._condition(element["filter"])
logging.debug(f" Filter: {cond}")
except KeyError:
- logging.error(u" No filter defined.")
+ logging.error(" No filter defined.")
return None
if params is None:
- params = element.get(u"parameters", None)
+ params = element.get("parameters", None)
if params:
- params.append(u"type")
+ params.extend(("type", "status"))
- data_to_filter = data if data else element[u"data"]
- data = pd.Series()
+ data_to_filter = data if data else element["data"]
+ data = pd.Series(dtype="float64")
try:
for job, builds in data_to_filter.items():
- data[job] = pd.Series()
+ data[job] = pd.Series(dtype="float64")
for build in builds:
- data[job][str(build)] = pd.Series()
+ data[job][str(build)] = pd.Series(dtype="float64")
try:
data_dict = dict(
self.data[job][str(build)][data_set].items())
return None
for test_id, test_data in data_dict.items():
- if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
- data[job][str(build)][test_id] = pd.Series()
+ if eval(cond, {"tags": test_data.get("tags", "")}):
+ data[job][str(build)][test_id] = \
+ pd.Series(dtype="float64")
if params is None:
for param, val in test_data.items():
data[job][str(build)][test_id][param] = val
test_data[param]
except KeyError:
data[job][str(build)][test_id][param] =\
- u"No Data"
+ "No Data"
return data
except (KeyError, IndexError, ValueError) as err:
)
return None
- def filter_tests_by_name(self, element, params=None, data_set=u"tests",
+ def filter_tests_by_name(self, element, params=None, data_set="tests",
continue_on_error=False):
"""Filter required data from the given jobs and builds.
The output data structure is:
-
- job 1
- build 1
- test (or suite) 1 ID:
:rtype pandas.Series
"""
- include = element.get(u"include", None)
+ include = element.get("include", None)
if not include:
- logging.warning(u"No tests to include, skipping the element.")
+ logging.warning("No tests to include, skipping the element.")
return None
if params is None:
- params = element.get(u"parameters", None)
- if params:
- params.append(u"type")
+ params = element.get("parameters", None)
+ if params and "type" not in params:
+ params.append("type")
+
+ cores = element.get("core", None)
+ if cores:
+ tests = list()
+ for core in cores:
+ for test in include:
+ tests.append(test.format(core=core))
+ else:
+ tests = include
- data = pd.Series()
+ data = pd.Series(dtype="float64")
try:
- for job, builds in element[u"data"].items():
- data[job] = pd.Series()
+ for job, builds in element["data"].items():
+ data[job] = pd.Series(dtype="float64")
for build in builds:
- data[job][str(build)] = pd.Series()
- for test in include:
+ data[job][str(build)] = pd.Series(dtype="float64")
+ for test in tests:
try:
reg_ex = re.compile(str(test).lower())
for test_id in self.data[job][
if re.match(reg_ex, str(test_id).lower()):
test_data = self.data[job][
str(build)][data_set][test_id]
- data[job][str(build)][test_id] = pd.Series()
+ data[job][str(build)][test_id] = \
+ pd.Series(dtype="float64")
if params is None:
for param, val in test_data.items():
data[job][str(build)][test_id]\
test_data[param]
except KeyError:
data[job][str(build)][
- test_id][param] = u"No Data"
+ test_id][param] = "No Data"
except KeyError as err:
- logging.error(repr(err))
if continue_on_error:
+ logging.debug(repr(err))
continue
+ logging.error(repr(err))
return None
return data
:rtype: pandas.Series
"""
- logging.info(u" Merging data ...")
+ logging.info(" Merging data ...")
- merged_data = pd.Series()
+ merged_data = pd.Series(dtype="float64")
for builds in data.values:
for item in builds.values:
for item_id, item_data in item.items():
merged_data[item_id] = item_data
-
return merged_data
def print_all_oper_data(self):
"""Print all operational data to console.
"""
- tbl_hdr = (
- u"Name",
- u"Nr of Vectors",
- u"Nr of Packets",
- u"Suspends",
- u"Cycles per Packet",
- u"Average Vector Size"
- )
-
for job in self._input_data.values:
for build in job.values:
- for test_id, test_data in build[u"tests"].items():
+ for test_id, test_data in build["tests"].items():
print(f"{test_id}")
- if test_data.get(u"show-run", None) is None:
+ if test_data.get("show-run", None) is None:
continue
- for dut_name, data in test_data[u"show-run"].items():
- if data.get(u"threads", None) is None:
+ for dut_name, data in test_data["show-run"].items():
+ if data.get("runtime", None) is None:
+ continue
+ runtime = loads(data["runtime"])
+ try:
+ threads_nr = len(runtime[0]["clocks"])
+ except (IndexError, KeyError):
continue
- print(f"Host IP: {data.get(u'host', '')}, "
- f"Socket: {data.get(u'socket', '')}")
- for thread_nr, thread in data[u"threads"].items():
- txt_table = prettytable.PrettyTable(tbl_hdr)
+ threads = OrderedDict(
+ {idx: list() for idx in range(threads_nr)})
+ for item in runtime:
+ for idx in range(threads_nr):
+ if item["vectors"][idx] > 0:
+ clocks = item["clocks"][idx] / \
+ item["vectors"][idx]
+ elif item["calls"][idx] > 0:
+ clocks = item["clocks"][idx] / \
+ item["calls"][idx]
+ elif item["suspends"][idx] > 0:
+ clocks = item["clocks"][idx] / \
+ item["suspends"][idx]
+ else:
+ clocks = 0.0
+
+ if item["calls"][idx] > 0:
+ vectors_call = item["vectors"][idx] / \
+ item["calls"][idx]
+ else:
+ vectors_call = 0.0
+
+ if int(item["calls"][idx]) + int(
+ item["vectors"][idx]) + \
+ int(item["suspends"][idx]):
+ threads[idx].append([
+ item["name"],
+ item["calls"][idx],
+ item["vectors"][idx],
+ item["suspends"][idx],
+ clocks,
+ vectors_call
+ ])
+
+ print(f"Host IP: {data.get('host', '')}, "
+ f"Socket: {data.get('socket', '')}")
+ for thread_nr, thread in threads.items():
+ txt_table = prettytable.PrettyTable(
+ (
+ "Name",
+ "Nr of Vectors",
+ "Nr of Packets",
+ "Suspends",
+ "Cycles per Packet",
+ "Average Vector Size"
+ )
+ )
avg = 0.0
for row in thread:
txt_table.add_row(row)
avg += row[-1]
if len(thread) == 0:
- avg = u""
+ avg = ""
else:
avg = f", Average Vector Size per Node: " \
f"{(avg / len(thread)):.2f}"
- th_name = u"main" if thread_nr == 0 \
+ th_name = "main" if thread_nr == 0 \
else f"worker_{thread_nr}"
print(f"{dut_name}, {th_name}{avg}")
- txt_table.float_format = u".2"
- txt_table.align = u"r"
- txt_table.align[u"Name"] = u"l"
+ txt_table.float_format = ".2"
+ txt_table.align = "r"
+ txt_table.align["Name"] = "l"
print(f"{txt_table.get_string()}\n")