Refactor getting telemetry 38/21638/21
authorPeter Mikus <pmikus@cisco.com>
Fri, 30 Aug 2019 13:20:14 +0000 (13:20 +0000)
committerPeter Mikus <pmikus@cisco.com>
Tue, 10 Sep 2019 07:28:44 +0000 (07:28 +0000)
+ Ability to get stats from CNF via SocketPAPI
- Remove obsolete functions

Signed-off-by: Peter Mikus <pmikus@cisco.com>
Change-Id: I4d1b32a7279244592be96644e4f8a530c4f29a15

resources/libraries/python/Constants.py
resources/libraries/python/ContainerUtils.py
resources/libraries/python/PapiExecutor.py
resources/libraries/python/VppCounters.py
resources/libraries/python/topology.py
resources/libraries/robot/performance/performance_utils.robot
resources/libraries/robot/shared/container.robot
resources/libraries/robot/shared/counters.robot [deleted file]
resources/libraries/robot/shared/default.robot
resources/libraries/robot/shared/test_teardown.robot
resources/libraries/robot/shared/traffic.robot

index 9606a10..b7f9193 100644 (file)
@@ -228,6 +228,9 @@ class Constants(object):
     #  /tmp directory is inside the DUT1 docker.
     DUT1_UUID = get_str_from_env("DUT1_UUID", "")
 
+    # Default path to VPP API Stats socket.
+    SOCKSTAT_PATH = "/run/vpp/stats.sock"
+
     # Global "kill switch" for CRC checking during runtime.
     FAIL_ON_CRC_MISMATCH = get_optimistic_bool_from_env("FAIL_ON_CRC_MISMATCH")
 
index 363411c..cd48fc6 100644 (file)
@@ -21,7 +21,7 @@ from collections import OrderedDict, Counter
 
 from resources.libraries.python.ssh import SSH
 from resources.libraries.python.Constants import Constants
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, SocketType
 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
 
 
@@ -430,6 +430,22 @@ class ContainerEngine(object):
         self.execute('supervisorctl reload')
         self.execute('supervisorctl start vpp')
 
+        from robot.libraries.BuiltIn import BuiltIn
+        topo_instance = BuiltIn().get_library_instance(
+            'resources.libraries.python.topology.Topology')
+        topo_instance.add_new_socket(
+            self.container.node,
+            SocketType.PAPI,
+            self.container.name,
+            '{root}/tmp/vpp_sockets/{name}/api.sock'.
+            format(root=self.container.root, name=self.container.name))
+        topo_instance.add_new_socket(
+            self.container.node,
+            SocketType.STATS,
+            self.container.name,
+            '{root}/tmp/vpp_sockets/{name}/stats.sock'.
+            format(root=self.container.root, name=self.container.name))
+
     def restart_vpp(self):
         """Restart VPP service inside a container."""
         self.execute('supervisorctl restart vpp')
@@ -449,7 +465,8 @@ class ContainerEngine(object):
         vpp_config.add_unix_cli_listen()
         vpp_config.add_unix_nodaemon()
         vpp_config.add_unix_exec('/tmp/running.exec')
-        vpp_config.add_socksvr()
+        vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
+        vpp_config.add_statseg_per_node_counters(value='on')
         # We will pop the first core from the list to be a main core
         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
         # If more cores in the list, the rest will be used as workers.
@@ -499,7 +516,8 @@ class ContainerEngine(object):
         vpp_config.add_unix_cli_listen()
         vpp_config.add_unix_nodaemon()
         vpp_config.add_unix_exec('/tmp/running.exec')
-        vpp_config.add_socksvr()
+        vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
+        vpp_config.add_statseg_per_node_counters(value='on')
         vpp_config.add_plugin('disable', 'dpdk_plugin.so')
 
         # Apply configuration
index 74ff7a0..aec43b6 100644 (file)
@@ -34,6 +34,7 @@ from resources.libraries.python.PythonThree import raise_from
 from resources.libraries.python.PapiHistory import PapiHistory
 from resources.libraries.python.ssh import (
     SSH, SSHTimeout, exec_cmd_no_error, scp_node)
+from resources.libraries.python.topology import Topology, SocketType
 from resources.libraries.python.VppApiCrc import VppApiCrcChecker
 
 
@@ -397,7 +398,7 @@ class PapiSocketExecutor(object):
         :raises AssertionError: If retval is nonzero, parsing or ssh error.
         """
         reply = self.get_reply(err_msg=err_msg)
-        logger.info("Getting index from {reply!r}".format(reply=reply))
+        logger.trace("Getting index from {reply!r}".format(reply=reply))
         return reply["sw_if_index"]
 
     def get_details(self, err_msg="Failed to get dump details."):
@@ -418,15 +419,18 @@ class PapiSocketExecutor(object):
         return self._execute(err_msg)
 
     @staticmethod
-    def run_cli_cmd(node, cmd, log=True):
+    def run_cli_cmd(node, cmd, log=True,
+                    remote_vpp_socket=Constants.SOCKSVR_PATH):
         """Run a CLI command as cli_inband, return the "reply" field of reply.
 
         Optionally, log the field value.
 
         :param node: Node to run command on.
         :param cmd: The CLI command to be run on the node.
+        :param remote_vpp_socket: Path to remote socket to tunnel to.
         :param log: If True, the response is logged.
         :type node: dict
+        :type remote_vpp_socket: str
         :type cmd: str
         :type log: bool
         :returns: CLI output.
@@ -436,12 +440,32 @@ class PapiSocketExecutor(object):
         args = dict(cmd=cmd)
         err_msg = "Failed to run 'cli_inband {cmd}' PAPI command on host " \
                   "{host}".format(host=node['host'], cmd=cmd)
-        with PapiSocketExecutor(node) as papi_exec:
+        with PapiSocketExecutor(node, remote_vpp_socket) as papi_exec:
             reply = papi_exec.add(cli, **args).get_reply(err_msg)["reply"]
         if log:
-            logger.info("{cmd}:\n{reply}".format(cmd=cmd, reply=reply))
+            logger.info(
+                "{cmd} ({host} - {remote_vpp_socket}):\n{reply}".
+                format(cmd=cmd, reply=reply,
+                       remote_vpp_socket=remote_vpp_socket, host=node['host']))
         return reply
 
+    @staticmethod
+    def run_cli_cmd_on_all_sockets(node, cmd, log=True):
+        """Run a CLI command as cli_inband, on all sockets in topology file.
+
+        :param node: Node to run command on.
+        :param cmd: The CLI command to be run on the node.
+        :param log: If True, the response is logged.
+        :type node: dict
+        :type cmd: str
+        :type log: bool
+        """
+        sockets = Topology.get_node_sockets(node, socket_type=SocketType.PAPI)
+        if sockets:
+            for socket in sockets.values():
+                PapiSocketExecutor.run_cli_cmd(
+                    node, cmd, log=log, remote_vpp_socket=socket)
+
     @staticmethod
     def dump_and_log(node, cmds):
         """Dump and log requested information, return None.
@@ -607,7 +631,8 @@ class PapiExecutor(object):
             api_name=csit_papi_command, api_args=copy.deepcopy(kwargs)))
         return self
 
-    def get_stats(self, err_msg="Failed to get statistics.", timeout=120):
+    def get_stats(self, err_msg="Failed to get statistics.", timeout=120,
+                  socket=Constants.SOCKSTAT_PATH):
         """Get VPP Stats from VPP Python API.
 
         :param err_msg: The message used if the PAPI command(s) execution fails.
@@ -617,12 +642,12 @@ class PapiExecutor(object):
         :returns: Requested VPP statistics.
         :rtype: list of dict
         """
-
         paths = [cmd['api_args']['path'] for cmd in self._api_command_list]
         self._api_command_list = list()
 
         stdout = self._execute_papi(
-            paths, method='stats', err_msg=err_msg, timeout=timeout)
+            paths, method='stats', err_msg=err_msg, timeout=timeout,
+            socket=socket)
 
         return json.loads(stdout)
 
@@ -667,7 +692,7 @@ class PapiExecutor(object):
         return api_data_processed
 
     def _execute_papi(self, api_data, method='request', err_msg="",
-                      timeout=120):
+                      timeout=120, socket=None):
         """Execute PAPI command(s) on remote node and store the result.
 
         :param api_data: List of APIs with their arguments.
@@ -685,7 +710,6 @@ class PapiExecutor(object):
         :raises RuntimeError: If PAPI executor failed due to another reason.
         :raises AssertionError: If PAPI command(s) execution has failed.
         """
-
         if not api_data:
             raise RuntimeError("No API data provided.")
 
@@ -693,10 +717,12 @@ class PapiExecutor(object):
             if method in ("stats", "stats_request") \
             else json.dumps(self._process_api_data(api_data))
 
-        cmd = "{fw_dir}/{papi_provider} --method {method} --data '{json}'".\
-            format(
-                fw_dir=Constants.REMOTE_FW_DIR, method=method, json=json_data,
-                papi_provider=Constants.RESOURCES_PAPI_PROVIDER)
+        sock = " --socket {socket}".format(socket=socket) if socket else ""
+        cmd = (
+            "{fw_dir}/{papi_provider} --method {method} --data '{json}'{socket}"
+            .format(fw_dir=Constants.REMOTE_FW_DIR,
+                    papi_provider=Constants.RESOURCES_PAPI_PROVIDER,
+                    method=method, json=json_data, socket=sock))
         try:
             ret_code, stdout, _ = self._ssh.exec_command_sudo(
                 cmd=cmd, timeout=timeout, log_stdout_err=False)
index f847ca6..65d6477 100644 (file)
 
 """VPP counters utilities library."""
 
-import time
-
 from pprint import pformat
 
 from robot.api import logger
 from resources.libraries.python.PapiExecutor import PapiExecutor
 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
-from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.topology import Topology, SocketType, NodeType
 
 
 class VppCounters(object):
@@ -29,17 +27,6 @@ class VppCounters(object):
     def __init__(self):
         self._stats_table = None
 
-    @staticmethod
-    def _get_non_zero_items(data):
-        """Extract and return non-zero items from the input data.
-
-        :param data: Data to filter.
-        :type data: dict
-        :returns: Dictionary with non-zero items.
-        :rtype dict
-        """
-        return {k: data[k] for k in data.keys() if sum(data[k])}
-
     @staticmethod
     def vpp_show_errors(node):
         """Run "show errors" debug CLI command.
@@ -47,32 +34,19 @@ class VppCounters(object):
         :param node: Node to run command on.
         :type node: dict
         """
-        PapiSocketExecutor.run_cli_cmd(node, 'show errors')
-
-    @staticmethod
-    def vpp_show_errors_verbose(node):
-        """Run "show errors verbose" debug CLI command.
-
-        :param node: Node to run command on.
-        :type node: dict
-        """
-        PapiSocketExecutor.run_cli_cmd(node, 'show errors verbose')
+        PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+            node, 'show errors')
 
     @staticmethod
-    def vpp_show_errors_on_all_duts(nodes, verbose=False):
+    def vpp_show_errors_on_all_duts(nodes):
         """Show errors on all DUTs.
 
         :param nodes: VPP nodes.
-        :param verbose: If True show verbose output.
         :type nodes: dict
-        :type verbose: bool
         """
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
-                if verbose:
-                    VppCounters.vpp_show_errors_verbose(node)
-                else:
-                    VppCounters.vpp_show_errors(node)
+                VppCounters.vpp_show_errors(node)
 
     @staticmethod
     def vpp_show_runtime(node, log_zeros=False):
@@ -84,68 +58,63 @@ class VppCounters(object):
         :type log_zeros: bool
         """
         args = dict(path='^/sys/node')
-        with PapiExecutor(node) as papi_exec:
-            stats = papi_exec.add("vpp-stats", **args).get_stats()[0]
-            # TODO: Introduce get_stat?
-
-        names = stats['/sys/node/names']
-
-        if not names:
-            return
-
-        runtime = []
-        runtime_non_zero = []
-
-        for name in names:
-            runtime.append({'name': name})
-
-        for idx, runtime_item in enumerate(runtime):
-
-            calls_th = []
-            for thread in stats['/sys/node/calls']:
-                calls_th.append(thread[idx])
-            runtime_item["calls"] = calls_th
-
-            vectors_th = []
-            for thread in stats['/sys/node/vectors']:
-                vectors_th.append(thread[idx])
-            runtime_item["vectors"] = vectors_th
-
-            suspends_th = []
-            for thread in stats['/sys/node/suspends']:
-                suspends_th.append(thread[idx])
-            runtime_item["suspends"] = suspends_th
-
-            clocks_th = []
-            for thread in stats['/sys/node/clocks']:
-                clocks_th.append(thread[idx])
-            runtime_item["clocks"] = clocks_th
-
-            if (sum(calls_th) or sum(vectors_th) or
-                    sum(suspends_th) or sum(clocks_th)):
-                runtime_non_zero.append(runtime_item)
-
-        if log_zeros:
-            logger.info("Runtime:\n{runtime}".format(
-                runtime=pformat(runtime)))
-        else:
-            logger.info("Runtime:\n{runtime}".format(
-                runtime=pformat(runtime_non_zero)))
-
-    @staticmethod
-    def vpp_show_runtime_verbose(node):
-        """Run "show runtime verbose" CLI command.
-
-        TODO: Remove?
-              Only verbose output is possible to get using VPPStats.
-
-        :param node: Node to run command on.
-        :type node: dict
-        """
-        VppCounters.vpp_show_runtime(node)
+        sockets = Topology.get_node_sockets(node, socket_type=SocketType.STATS)
+        if sockets:
+            for socket in sockets.values():
+                with PapiExecutor(node) as papi_exec:
+                    stats = papi_exec.add("vpp-stats", **args).\
+                        get_stats(socket=socket)[0]
+
+                names = stats['/sys/node/names']
+
+                if not names:
+                    return
+
+                runtime = []
+                runtime_non_zero = []
+
+                for name in names:
+                    runtime.append({'name': name})
+
+                for idx, runtime_item in enumerate(runtime):
+
+                    calls_th = []
+                    for thread in stats['/sys/node/calls']:
+                        calls_th.append(thread[idx])
+                    runtime_item["calls"] = calls_th
+
+                    vectors_th = []
+                    for thread in stats['/sys/node/vectors']:
+                        vectors_th.append(thread[idx])
+                    runtime_item["vectors"] = vectors_th
+
+                    suspends_th = []
+                    for thread in stats['/sys/node/suspends']:
+                        suspends_th.append(thread[idx])
+                    runtime_item["suspends"] = suspends_th
+
+                    clocks_th = []
+                    for thread in stats['/sys/node/clocks']:
+                        clocks_th.append(thread[idx])
+                    runtime_item["clocks"] = clocks_th
+
+                    if (sum(calls_th) or sum(vectors_th) or
+                            sum(suspends_th) or sum(clocks_th)):
+                        runtime_non_zero.append(runtime_item)
+
+                if log_zeros:
+                    logger.info(
+                        "stats runtime ({host} - {socket}):\n{runtime}".format(
+                        host=node['host'], runtime=pformat(runtime),
+                        socket=socket))
+                else:
+                    logger.info(
+                        "stats runtime ({host} - {socket}):\n{runtime}".format(
+                        host=node['host'], runtime=pformat(runtime_non_zero),
+                        socket=socket))
 
     @staticmethod
-    def show_runtime_counters_on_all_duts(nodes):
+    def vpp_show_runtime_counters_on_all_duts(nodes):
         """Clear VPP runtime counters on all DUTs.
 
         :param nodes: VPP nodes.
@@ -162,7 +131,8 @@ class VppCounters(object):
         :param node: Node to run command on.
         :type node: dict
         """
-        PapiSocketExecutor.run_cli_cmd(node, 'show hardware verbose')
+        PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+            node, 'show hardware verbose')
 
     @staticmethod
     def vpp_show_memory(node):
@@ -182,13 +152,12 @@ class VppCounters(object):
 
         :param node: Node to run command on.
         :type node: dict
-        :returns: Verified data from PAPI response.
-        :rtype: dict
         """
-        return PapiSocketExecutor.run_cli_cmd(node, 'clear runtime', log=False)
+        PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+            node, 'clear runtime', log=False)
 
     @staticmethod
-    def clear_runtime_counters_on_all_duts(nodes):
+    def vpp_clear_runtime_counters_on_all_duts(nodes):
         """Run "clear runtime" CLI command on all DUTs.
 
         :param nodes: VPP nodes.
@@ -198,29 +167,6 @@ class VppCounters(object):
             if node['type'] == NodeType.DUT:
                 VppCounters.vpp_clear_runtime(node)
 
-    @staticmethod
-    def vpp_clear_interface_counters(node):
-        """Run "clear interfaces" CLI command.
-
-        :param node: Node to run command on.
-        :type node: dict
-        :returns: Verified data from PAPI response.
-        :rtype: dict
-        """
-        return PapiSocketExecutor.run_cli_cmd(
-            node, 'clear interfaces', log=False)
-
-    @staticmethod
-    def clear_interface_counters_on_all_duts(nodes):
-        """Clear interface counters on all DUTs.
-
-        :param nodes: VPP nodes.
-        :type nodes: dict
-        """
-        for node in nodes.values():
-            if node['type'] == NodeType.DUT:
-                VppCounters.vpp_clear_interface_counters(node)
-
     @staticmethod
     def vpp_clear_hardware_counters(node):
         """Run "clear hardware" CLI command.
@@ -230,10 +176,11 @@ class VppCounters(object):
         :returns: Verified data from PAPI response.
         :rtype: dict
         """
-        return PapiSocketExecutor.run_cli_cmd(node, 'clear hardware', log=False)
+        PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+            node, 'clear hardware', log=False)
 
     @staticmethod
-    def clear_hardware_counters_on_all_duts(nodes):
+    def vpp_clear_hardware_counters_on_all_duts(nodes):
         """Clear hardware counters on all DUTs.
 
         :param nodes: VPP nodes.
@@ -249,13 +196,12 @@ class VppCounters(object):
 
         :param node: Node to run command on.
         :type node: dict
-        :returns: Verified data from PAPI response.
-        :rtype: dict
         """
-        return PapiSocketExecutor.run_cli_cmd(node, 'clear errors', log=False)
+        PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+            node, 'clear errors', log=False)
 
     @staticmethod
-    def clear_error_counters_on_all_duts(nodes):
+    def vpp_clear_error_counters_on_all_duts(nodes):
         """Clear VPP errors counters on all DUTs.
 
         :param nodes: VPP nodes.
@@ -265,61 +211,6 @@ class VppCounters(object):
             if node['type'] == NodeType.DUT:
                 VppCounters.vpp_clear_errors_counters(node)
 
-    def vpp_get_ipv4_interface_counter(self, node, interface):
-        """
-
-        :param node: Node to get interface IPv4 counter on.
-        :param interface: Interface name.
-        :type node: dict
-        :type interface: str
-        :returns: Interface IPv4 counter.
-        :rtype: int
-        """
-        return self.vpp_get_ipv46_interface_counter(node, interface, False)
-
-    def vpp_get_ipv6_interface_counter(self, node, interface):
-        """
-
-        :param node: Node to get interface IPv6 counter on.
-        :param interface: Interface name.
-        :type node: dict
-        :type interface: str
-        :returns: Interface IPv6 counter.
-        :rtype: int
-        """
-        return self.vpp_get_ipv46_interface_counter(node, interface, True)
-
-    def vpp_get_ipv46_interface_counter(self, node, interface, is_ipv6=True):
-        """Return interface IPv4/IPv6 counter.
-
-        :param node: Node to get interface IPv4/IPv6 counter on.
-        :param interface: Interface name.
-        :param is_ipv6: Specify IP version.
-        :type node: dict
-        :type interface: str
-        :type is_ipv6: bool
-        :returns: Interface IPv4/IPv6 counter.
-        :rtype: int
-        """
-        version = 'ip6' if is_ipv6 else 'ip4'
-        topo = Topology()
-        sw_if_index = topo.get_interface_sw_index(node, interface)
-        if sw_if_index is None:
-            logger.trace('{i} sw_if_index not found.'.format(i=interface))
-            return 0
-
-        if_counters = self._stats_table.get('interface_counters')
-        if not if_counters:
-            logger.trace('No interface counters.')
-            return 0
-        for counter in if_counters:
-            if counter['vnet_counter_type'] == version:
-                data = counter['data']
-                return data[sw_if_index]
-        logger.trace('{i} {v} counter not found.'.format(
-            i=interface, v=version))
-        return 0
-
     @staticmethod
     def show_vpp_statistics(node):
         """Show [error, hardware, interface] stats.
@@ -333,16 +224,34 @@ class VppCounters(object):
         VppCounters.vpp_show_memory(node)
 
     @staticmethod
-    def show_statistics_on_all_duts(nodes, sleeptime=5):
-        """Show VPP statistics on all DUTs.
+    def show_statistics_on_all_duts(nodes):
+        """Show statistics on all DUTs.
 
-        :param nodes: VPP nodes.
+        :param nodes: DUT nodes.
         :type nodes: dict
-        :param sleeptime: Time to wait for traffic to arrive back to TG.
-        :type sleeptime: int
         """
-        logger.trace('Waiting for statistics to be collected')
-        time.sleep(sleeptime)
         for node in nodes.values():
             if node['type'] == NodeType.DUT:
                 VppCounters.show_vpp_statistics(node)
+
+    @staticmethod
+    def clear_vpp_statistics(node):
+        """Clear [error, hardware, interface] stats.
+
+        :param node: VPP node.
+        :type node: dict
+        """
+        VppCounters.vpp_clear_errors_counters(node)
+        VppCounters.vpp_clear_hardware_counters(node)
+        VppCounters.vpp_clear_runtime(node)
+
+    @staticmethod
+    def clear_statistics_on_all_duts(nodes):
+        """Clear statistics on all DUTs.
+
+        :param nodes: DUT nodes.
+        :type nodes: dict
+        """
+        for node in nodes.values():
+            if node['type'] == NodeType.DUT:
+                VppCounters.clear_vpp_statistics(node)
index 1e5ce4b..91578a5 100644 (file)
@@ -23,7 +23,7 @@ from robot.api import logger
 from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError
 from robot.api.deco import keyword
 
-__all__ = ["DICT__nodes", 'Topology', 'NodeType']
+__all__ = ["DICT__nodes", 'Topology', 'NodeType', 'SocketType']
 
 
 def load_topo_from_yaml():
@@ -61,6 +61,12 @@ class NodeSubTypeTG(object):
     # IxNetwork
     IXNET = 'IXNET'
 
+class SocketType(object):
+    """Defines socket types used in topology dictionaries."""
+    # VPP Socket PAPI
+    PAPI = 'PAPI'
+    # VPP PAPI Stats (legacy option until stats are migrated to Socket PAPI)
+    STATS = 'STATS'
 
 DICT__nodes = load_topo_from_yaml()
 
@@ -83,6 +89,26 @@ class Topology(object):
     the methods without having filled active topology with internal nodes data.
     """
 
+    def add_node_item(self, node, value, path):
+        """Add item to topology node.
+
+        :param node: Topology node.
+        :param value: Value to insert.
+        :param path: Path where to insert item.
+        :type node: dict
+        :type value: str
+        :type path: list
+        """
+        if len(path) == 1:
+            node[path[0]] = value
+            return
+        if path[0] not in node:
+            node[path[0]] = {}
+        elif isinstance(node[path[0]], str):
+            node[path[0]] = {} if node[path[0]] == '' \
+                else {node[path[0]]: ''}
+        self.add_node_item(node[path[0]], value, path[1:])
+
     @staticmethod
     def add_new_port(node, ptype):
         """Add new port to the node to active topology.
@@ -1033,3 +1059,47 @@ class Topology(object):
             return iface_key
         except KeyError:
             return None
+
+    def add_new_socket(self, node, socket_type, socket_id, socket_path):
+        """Add socket file of specific SocketType and ID to node.
+
+        :param node: Node to add socket on.
+        :param socket_type: Socket type.
+        :param socket_id: Socket id.
+        :param socket_path: Socket absolute path.
+        :type node: dict
+        :type socket_type: SocketType
+        :type socket_id: str
+        :type socket path: str
+        """
+        path = ['sockets', socket_type, socket_id]
+        self.add_node_item(node, socket_path, path)
+
+    @staticmethod
+    def get_node_sockets(node, socket_type=None):
+        """Get node socket files.
+
+        :param node: Node to get sockets from.
+        :param socket_type: Socket type or None for all sockets.
+        :type node: dict
+        :type socket_type: SocketType
+        :returns: Node sockets or None if not found.
+        :rtype: list
+        """
+        try:
+            if socket_type:
+                return node['sockets'][socket_type]
+            return node['sockets']
+        except KeyError:
+            return None
+
+    @staticmethod
+    def clean_sockets_on_all_nodes(nodes):
+        """Remove temporary socket files from topology file.
+
+        :param nodes: SUT nodes.
+        :type node: dict
+        """
+        for node in nodes.values():
+            if 'sockets' in node.keys():
+                node.pop('sockets')
index bffd35d..8ac7155 100644 (file)
 | | Clear and show runtime counters with running traffic | ${trial_duration}
 | | ... | ${rate} | ${frame_size} | ${traffic_profile}
 | | ... | ${unidirection} | ${tx_port} | ${rx_port}
-| | Run Keyword If | ${dut_stats}==${True} | Clear all counters on all DUTs
+| | Run Keyword If | ${dut_stats}==${True}
+| | ... | Clear statistics on all DUTs | ${nodes}
 | | Run Keyword If | ${dut_stats}==${True} and ${pkt_trace}==${True}
 | | ... | VPP Enable Traces On All DUTs | ${nodes} | fail_on_error=${False}
 | | Run Keyword If | ${dut_stats}==${True}
 | | ... | warmup_time=${0} | async_call=${True} | latency=${False}
 | | ... | unidirection=${unidirection} | tx_port=${tx_port} | rx_port=${rx_port}
 | | Run Keyword If | ${dut_stats}==${True}
-| | ... | Clear runtime counters on all DUTs | ${nodes}
+| | ... | VPP clear runtime counters on all DUTs | ${nodes}
 | | Sleep | ${duration}
 | | Run Keyword If | ${dut_stats}==${True}
-| | ... | Show runtime counters on all DUTs | ${nodes}
+| | ... | VPP show runtime counters on all DUTs | ${nodes}
 | | Stop traffic on tg
 
 | Start Traffic on Background
index cb3bc29..f440768 100644 (file)
 | | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${nf_chain}=${1}
 | | ... | ${nf_node}=${1} | ${auto_scale}=${True} | ${pinning}=${True}
 | | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Run Keyword | Construct container on DUT | ${dut}
+| | | ... | ${nf_chains} | ${nf_nodes} | ${nf_chain}
+| | | ... | ${nf_node} | ${auto_scale} | ${pinning}
+
+| Construct container on DUT
+| | [Documentation] | Construct 1 CNF of specific technology on specific DUT.
+| | ...
+| | ... | *Arguments:*
+| | ... | - dut: DUT node to construct the CNF on. Type: string
+| | ... | - nf_chains: Total number of chains (Optional). Type: integer, default
+| | ... | value: ${1}
+| | ... | - nf_nodes: Total number of nodes per chain (Optional). Type: integer,
+| | ... | default value: ${1}
+| | ... | - nf_chain: Chain ID (Optional). Type: integer, default value: ${1}
+| | ... | - nf_node: Node ID (Optional). Type: integer, default value: ${1}
+| | ... | - auto_scale - If True, use same amount of Dataplane threads for
+| | ... |   network function as DUT, otherwise use single physical core for
+| | ... |   every network function. Type: boolean
+| | ... | - pinning: Set True if CPU pinning should be done on starting
+| | ... |   containers. Type: boolean, default value: ${False}
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Construct container on DUT \| DUT1 \| 1 \| 1 \| 1 \| 1 \|
+| | ... | \| ${True} \|
+| | ...
+| | [Arguments] | ${dut}
+| | ... | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${nf_chain}=${1}
+| | ... | ${nf_node}=${1} | ${auto_scale}=${True} | ${pinning}=${True}
+| | ...
 | | ${nf_dtcr_status} | ${value}= | Run Keyword And Ignore Error
 | | ... | Variable Should Exist | ${nf_dtcr}
 | | ${nf_dtcr}= | Run Keyword If | '${nf_dtcr_status}' == 'PASS'
 | | ${nf_dtc}= | Run Keyword If | ${pinning}
 | | ... | Set Variable If | ${auto_scale} | ${cpu_count_int}
 | | ... | ${nf_dtc}
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${dut} | IN | @{duts}
-| | | ${nf_id}= | Evaluate | (${nf_chain} - ${1}) * ${nf_nodes} + ${nf_node}
-| | | ${env}= | Create List | DEBIAN_FRONTEND=noninteractive
-| | | ${dut1_uuid_length} = | Get Length | ${DUT1_UUID}
-| | | ${root}= | Run Keyword If | ${dut1_uuid_length}
-| | | ... | Get Docker Mergeddir | ${nodes['DUT1']} | ${DUT1_UUID}
-| | | ... | ELSE | Set Variable | ${EMPTY}
-| | | ${node_arch}= | Get Node Arch | ${nodes['${dut}']}
-| | | ${mnt}= | Create List
-| | | ... | ${root}/tmp/:/mnt/host/
-| | ... | ${root}/dev/vfio/:/dev/vfio/
-| | ... | ${root}/usr/bin/vpp:/usr/bin/vpp
-| | ... | ${root}/usr/bin/vppctl:/usr/bin/vppctl
-| | ... | ${root}/usr/lib/${node_arch}-linux-gnu/:/usr/lib/${node_arch}-linux-gnu/
-| | ... | ${root}/usr/share/vpp/:/usr/share/vpp/
-| | ${nf_cpus}= | Set Variable | ${None}
-| | ${nf_cpus}= | Run Keyword If | ${pinning}
-| | ... | Get Affinity NF | ${nodes} | ${dut}
-| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes}
-| | ... | nf_chain=${nf_chain} | nf_node=${nf_node}
-| | ... | vs_dtc=${cpu_count_int} | nf_dtc=${nf_dtc} | nf_dtcr=${nf_dtcr}
-| | &{cont_args}= | Create Dictionary
-| | | ... | name=${dut}_${container_group}${nf_id}${DUT1_UUID}
-| | | ... | node=${nodes['${dut}']} | mnt=${mnt} | env=${env}
-| | Run Keyword If | ${pinning}
-| | ... | Set To Dictionary | ${cont_args} | cpuset_cpus=${nf_cpus}
-| | Run Keyword | ${container_group}.Construct container | &{cont_args}
+| | ${nf_id}= | Evaluate | (${nf_chain} - ${1}) * ${nf_nodes} + ${nf_node}
+| | ${env}= | Create List | DEBIAN_FRONTEND=noninteractive
+| | ${dut1_uuid_length} = | Get Length | ${DUT1_UUID}
+| | ${root}= | Run Keyword If | ${dut1_uuid_length}
+| | ... | Get Docker Mergeddir | ${nodes['DUT1']} | ${DUT1_UUID}
+| | ... | ELSE | Set Variable | ${EMPTY}
+| | ${node_arch}= | Get Node Arch | ${nodes['${dut}']}
+| | ${name}= | Set Variable | ${dut}_${container_group}${nf_id}${DUT1_UUID}
+| | ${mnt}= | Create List
+| | ... | ${root}/tmp/:/mnt/host/
+| | ... | ${root}/tmp/vpp_sockets/${name}/:/run/vpp/
+| | ... | ${root}/dev/vfio/:/dev/vfio/
+| | ... | ${root}/usr/bin/vpp:/usr/bin/vpp
+| | ... | ${root}/usr/bin/vppctl:/usr/bin/vppctl
+| | ... | ${root}/usr/lib/${node_arch}-linux-gnu/:/usr/lib/${node_arch}-linux-gnu/
+| | ... | ${root}/usr/share/vpp/:/usr/share/vpp/
+| | ${nf_cpus}= | Set Variable | ${None}
+| | ${nf_cpus}= | Run Keyword If | ${pinning}
+| | ... | Get Affinity NF | ${nodes} | ${dut}
+| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes}
+| | ... | nf_chain=${nf_chain} | nf_node=${nf_node}
+| | ... | vs_dtc=${cpu_count_int} | nf_dtc=${nf_dtc} | nf_dtcr=${nf_dtcr}
+| | &{cont_args}= | Create Dictionary
+| | ... | name=${name} | node=${nodes['${dut}']} | mnt=${mnt} | env=${env}
+| | ... | root=${root}
+| | Run Keyword If | ${pinning}
+| | ... | Set To Dictionary | ${cont_args} | cpuset_cpus=${nf_cpus}
+| | Run Keyword | ${container_group}.Construct container | &{cont_args}
 
-| Construct chain of containers on all DUTs
-| | [Documentation] | Construct 1 chain of 1..N CNFs on all DUT nodes.
+| Construct chain of containers
+| | [Documentation] | Construct 1 chain of 1..N CNFs on selected/all DUT nodes.
 | | ...
 | | ... | *Arguments:*
+| | ... | - dut: DUT node to start the containers on. Run on all nodes if None.
+| | ... |   Type: string or None
 | | ... | - nf_chains: Total number of chains. Type: integer
 | | ... | - nf_nodes: Total number of nodes per chain. Type: integer
 | | ... | - nf_chain: Chain ID. Type: integer
 | | ...
 | | ... | *Example:*
 | | ...
-| | ... | \| Construct chain of containers on all DUTs \| 1 \| 1 \| 1 \
-| | ... | \| ${True} \|
+| | ... | \| Construct chain of containers \| 1 \| 1 \| 1 \| ${True} \|
 | | ...
-| | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${nf_chain}=${1}
-| | ... | ${auto_scale}=${True} | ${pinning}=${True}
+| | [Arguments] | ${dut}=${None} | ${nf_chains}=${1} | ${nf_nodes}=${1}
+| | ... | ${nf_chain}=${1} | ${auto_scale}=${True} | ${pinning}=${True}
 | | ...
 | | :FOR | ${nf_node} | IN RANGE | 1 | ${nf_nodes}+1
-| | | Construct container on all DUTs | nf_chains=${nf_chains}
-| | | ... | nf_nodes=${nf_nodes} | nf_chain=${nf_chain} | nf_node=${nf_node}
-| | | ... | auto_scale=${auto_scale} | pinning=${pinning}
+| | | Run Keyword If | '${dut}' == '${None}'
+| | | ... | Construct container on all DUTs
+| | | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | nf_chain=${nf_chain}
+| | | ... | nf_node=${nf_node} | auto_scale=${auto_scale} | pinning=${pinning}
+| | | ... | ELSE
+| | | ... | Construct container on DUT | ${dut}
+| | | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | nf_chain=${nf_chain}
+| | | ... | nf_node=${nf_node} | auto_scale=${auto_scale} | pinning=${pinning}
 
-| Construct chains of containers on all DUTs
-| | [Documentation] | Construct 1..N chains of 1..N CNFs on all DUT nodes.
+| Construct chains of containers
+| | [Documentation] | Construct 1..N chains of 1..N CNFs on selected/all DUT
+| | ... | nodes.
 | | ...
 | | ... | *Arguments:*
+| | ... | - dut: DUT node to start the containers on. Run on all nodes if None.
+| | ... |   Type: string or None
 | | ... | - nf_chains: Total number of chains (Optional). Type: integer, default
 | | ... |   value: ${1}
 | | ... | - nf_nodes: Total number of nodes per chain (Optional). Type: integer,
 | | ...
 | | ... | *Example:*
 | | ...
-| | ... | \| Construct chains of containers on all DUTs \| 1 \| 1 \|
+| | ... | \| Construct chains of containers \| 1 \| 1 \|
 | | ...
-| | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${auto_scale}=${True}
-| | ... | ${pinning}=${True}
+| | [Arguments] | ${dut}=${None} | ${nf_chains}=${1} | ${nf_nodes}=${1}
+| | ... | ${auto_scale}=${True} | ${pinning}=${True}
 | | ...
 | | :FOR | ${nf_chain} | IN RANGE | 1 | ${nf_chains}+1
-| | | Construct chain of containers on all DUTs | nf_chains=${nf_chains}
-| | | ... | nf_nodes=${nf_nodes} | nf_chain=${nf_chain}
-| | | ... | auto_scale=${auto_scale} | pinning=${pinning}
+| | | Construct chain of containers
+| | | ... | dut=${dut} | nf_chains=${nf_chains} | nf_nodes=${nf_nodes}
+| | | ... | nf_chain=${nf_chain} | auto_scale=${auto_scale} | pinning=${pinning}
 
 | Acquire all '${group}' containers
 | | [Documentation] | Acquire all container(s) in specific container group on
 | | [Documentation] | Configure VPP on all container(s) in specific container
 | | ... | group on all DUT nodes.
 | | ...
+| | ... | *Test (or broader scope) variables read:*
+| | ... |   - container_chain_topology - Topology type used for configuring CNF
+| | ... |     (VPP) in container. Type: string
+| | ...
 | | ${dut1_if2} = | Get Variable Value | \${dut1_if2} | ${None}
 | | ${dut2_if2} = | Get Variable Value | \${dut2_if2} | ${None}
 | | Run Keyword If | '${container_chain_topology}' == 'chain_ip4'
 | | ... | ${group}.Configure VPP In All Containers | ${container_chain_topology}
 | | ... | tg_if1_mac=${tg_if1_mac} | tg_if2_mac=${tg_if2_mac}
 | | ... | nodes=${nf_nodes}
+| | ... | ELSE IF | '${container_chain_topology}' == 'chain_ipsec'
+| | ... | ${group}.Configure VPP In All Containers | ${container_chain_topology}
+| | ... | tg_if2_ip4=${tg_if2_ip4} | tg_if2_mac=${tg_if2_mac}
+| | ... | dut2_if1_ip4=${dut2_if1_ip4} | dut2_if2_ip4=${dut2_if2_ip4}
+| | ... | raddr_ip4=${raddr_ip4} | nodes=${nodes} | nf_nodes=${nf_nodes}
 | | ... | ELSE IF | '${container_chain_topology}' == 'pipeline_ip4'
 | | ... | ${group}.Configure VPP In All Containers | ${container_chain_topology}
 | | ... | tg_if1_mac=${tg_if1_mac} | tg_if2_mac=${tg_if2_mac}
 | | ... | Start containers for test.
 | | ...
 | | ... | *Arguments:*
+| | ... | - dut: DUT node to start the containers on. Run on all nodes if None.
+| | ... |   Type: string or None
 | | ... | - nf_chains: Total number of chains. Type: integer
 | | ... | - nf_nodes: Total number of nodes per chain. Type: integer
 | | ... | - auto_scale - If True, use same amount of Dataplane threads for
 | | ...
 | | ... | \| Start containers for test \| 1 \| 1 \|
 | | ...
-| | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${auto_scale}=${True}
-| | ... | ${pinning}=${True}
+| | [Arguments] | ${dut}=${None} | ${nf_chains}=${1} | ${nf_nodes}=${1}
+| | ... | ${auto_scale}=${True} | ${pinning}=${True}
 | | ...
 | | Set Test Variable | @{container_groups} | @{EMPTY}
 | | Set Test Variable | ${container_group} | CNF
 | | Set Test Variable | ${nf_nodes}
 | | Import Library | resources.libraries.python.ContainerUtils.ContainerManager
 | | ... | engine=${container_engine} | WITH NAME | ${container_group}
-| | Construct chains of containers on all DUTs | ${nf_chains} | ${nf_nodes}
+| | Construct chains of containers
+| | ... | dut=${dut} | nf_chains=${nf_chains} | nf_nodes=${nf_nodes}
 | | ... | auto_scale=${auto_scale} | pinning=${pinning}
 | | Acquire all '${container_group}' containers
 | | Create all '${container_group}' containers
diff --git a/resources/libraries/robot/shared/counters.robot b/resources/libraries/robot/shared/counters.robot
deleted file mode 100644 (file)
index 2746a1a..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-| Library | resources.libraries.python.VppCounters
-| ...
-| Documentation | VPP counters keywords
-
-*** Keywords ***
-| Clear all counters on all DUTs
-| | [Documentation] | Clear runtime, interface, hardware and error counters
-| | ... | on all DUTs with VPP instance
-| | Clear runtime counters on all DUTs | ${nodes}
-| | Clear interface counters on all DUTs | ${nodes}
-| | Clear hardware counters on all DUTs | ${nodes}
-| | Clear error counters on all DUTs | ${nodes}
index fd7ceeb..3da2026 100644 (file)
@@ -52,7 +52,6 @@
 | Resource | resources/libraries/robot/performance/performance_configuration.robot
 | Resource | resources/libraries/robot/performance/performance_limits.robot
 | Resource | resources/libraries/robot/performance/performance_utils.robot
-| Resource | resources/libraries/robot/shared/counters.robot
 | Resource | resources/libraries/robot/shared/interfaces.robot
 | Resource | resources/libraries/robot/shared/container.robot
 | Resource | resources/libraries/robot/shared/memif.robot
 | | | Run keyword | ${dut}.Add Unix CLI Listen
 | | | Run keyword | ${dut}.Add Unix Nodaemon
 | | | Run keyword | ${dut}.Add Unix Coredump
-| | | Run keyword | ${dut}.Add Socksvr
+| | | Run keyword | ${dut}.Add Socksvr | ${SOCKSVR_PATH}
 | | | Run keyword | ${dut}.Add DPDK No Tx Checksum Offload
 | | | Run keyword | ${dut}.Add DPDK Log Level | debug
 | | | Run keyword | ${dut}.Add DPDK Uio Driver
 | | ...
 | | :FOR | ${dut} | IN | @{duts}
 | | | Run keyword | ${dut}.Apply Config
+| | | Add New Socket | ${nodes['${dut}']} | PAPI | ${dut} | ${SOCKSVR_PATH}
+| | | Add New Socket | ${nodes['${dut}']} | STATS | ${dut} | ${SOCKSTAT_PATH}
 | | Save VPP PIDs
 | | Enable Coredump Limit VPP on All DUTs | ${nodes}
 | | Update All Interface Data On All Nodes | ${nodes} | skip_tg=${True}
 | | All TGs Set Interface Default Driver | ${nodes}
 | | Update All Interface Data On All Nodes | ${nodes}
 | | Reset PAPI History On All DUTs | ${nodes}
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Add New Socket | ${nodes['${dut}']} | PAPI | ${dut} | ${SOCKSVR_PATH}
+| | | Add New Socket | ${nodes['${dut}']} | STATS | ${dut} | ${SOCKSTAT_PATH}
 
 | Tear down functional test
 | | [Documentation] | Common test teardown for functional tests.
 | | Show PAPI History On All DUTs | ${nodes}
 | | Vpp Show Errors On All DUTs | ${nodes}
 | | Verify VPP PID in Teardown
+| | Clean Sockets On All Nodes | ${nodes}
 
 | Tear down LISP functional test
 | | [Documentation] | Common test teardown for functional tests with LISP.
 | | Show Vpp Settings | ${nodes['DUT2']}
 | | Vpp Show Errors On All DUTs | ${nodes}
 | | Verify VPP PID in Teardown
+| | Clean Sockets On All Nodes | ${nodes}
index 5d2c2fa..8ba914b 100644 (file)
@@ -39,6 +39,7 @@
 | | ... | Verify VPP PID in Teardown
 | | :FOR | ${action} | IN | @{actions}
 | | | Run Keyword | Additional Test Tear Down Action For ${action}
+| | Clean Sockets On All Nodes | ${nodes}
 
 | Additional Test Tear Down Action For performance
 | | [Documentation]
index 9a0728a..dc3cc05 100644 (file)
@@ -21,8 +21,6 @@
 | Library | resources.libraries.python.topology.Topology
 | Library | resources.libraries.python.TrafficScriptExecutor
 | ...
-| Resource | resources/libraries/robot/shared/counters.robot
-| ...
 | Documentation | Traffic keywords
 
 *** Keywords ***