-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
-from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import NodeType
+from resources.libraries.python.model.ExportResult import (
+ export_dut_type_and_version
+)
+from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd
+from resources.libraries.python.topology import Topology, SocketType, NodeType
-class VPPUtil(object):
+class VPPUtil:
"""General class for any VPP related methods/functions."""
@staticmethod
- def show_vpp_settings(node, *additional_cmds):
- """Print default VPP settings. In case others are needed, can be
- accepted as next parameters (each setting one parameter), preferably
- in form of a string.
-
- :param node: VPP node.
- :param additional_cmds: Additional commands that the vpp should print
- settings for.
- :type node: dict
- :type additional_cmds: tuple
- """
- def_setting_tb_displayed = {
- 'IPv6 FIB': 'ip6 fib',
- 'IPv4 FIB': 'ip fib',
- 'Interface IP': 'int addr',
- 'Interfaces': 'int',
- 'ARP': 'ip arp',
- 'Errors': 'err'
- }
-
- if additional_cmds:
- for cmd in additional_cmds:
- def_setting_tb_displayed['Custom Setting: {}'.format(cmd)] = cmd
-
- for _, cmd in def_setting_tb_displayed.items():
- command = 'vppctl sh {cmd}'.format(cmd=cmd)
- exec_cmd_no_error(node, command, timeout=30, sudo=True)
-
- @staticmethod
- def restart_vpp_service(node):
+ def restart_vpp_service(node, node_key=None):
"""Restart VPP service on the specified topology node.
+ Disconnect possibly connected PAPI executor.
+
:param node: Topology node.
+ :param node_key: Topology node key.
:type node: dict
+ :type node_key: str
"""
- DUTSetup.restart_service(node, Constants.VPP_UNIT)
+ # Containers have a separate lifecycle, but better be safe.
+ PapiSocketExecutor.disconnect_all_sockets_by_node(node)
+
+ VPPUtil.stop_vpp_service(node)
+ command = "/usr/bin/vpp -c /etc/vpp/startup.conf"
+ message = f"Node {node[u'host']} failed to start VPP!"
+ exec_cmd_no_error(
+ node, command, timeout=180, sudo=True, message=message
+ )
+
+ if node_key:
+ Topology.add_new_socket(
+ node, SocketType.CLI, node_key, Constants.SOCKCLI_PATH)
+ Topology.add_new_socket(
+ node, SocketType.PAPI, node_key, Constants.SOCKSVR_PATH)
+ Topology.add_new_socket(
+ node, SocketType.STATS, node_key, Constants.SOCKSTAT_PATH)
@staticmethod
def restart_vpp_service_on_all_duts(nodes):
:param nodes: Topology nodes.
:type nodes: dict
"""
- for node in nodes.values():
- if node['type'] == NodeType.DUT:
- VPPUtil.restart_vpp_service(node)
+ for node_key, node in nodes.items():
+ if node[u"type"] == NodeType.DUT:
+ VPPUtil.restart_vpp_service(node, node_key)
@staticmethod
- def stop_vpp_service(node):
+ def stop_vpp_service(node, node_key=None):
"""Stop VPP service on the specified topology node.
+ Disconnect possibly connected PAPI executor.
+
:param node: Topology node.
+ :param node_key: Topology node key.
:type node: dict
+ :type node_key: str
"""
- DUTSetup.stop_service(node, Constants.VPP_UNIT)
+ PapiSocketExecutor.disconnect_all_sockets_by_node(node)
+ command = "pkill -9 vpp; sleep 1"
+ exec_cmd(node, command, timeout=180, sudo=True)
+ command = (
+ "/bin/rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api"
+ )
+ exec_cmd(node, command, timeout=180, sudo=True)
+
+ if node_key:
+ if Topology.get_node_sockets(node, socket_type=SocketType.PAPI):
+ Topology.del_node_socket_id(node, SocketType.PAPI, node_key)
+ if Topology.get_node_sockets(node, socket_type=SocketType.STATS):
+ Topology.del_node_socket_id(node, SocketType.STATS, node_key)
@staticmethod
def stop_vpp_service_on_all_duts(nodes):
:param nodes: Topology nodes.
:type nodes: dict
"""
+ for node_key, node in nodes.items():
+ if node[u"type"] == NodeType.DUT:
+ VPPUtil.stop_vpp_service(node, node_key)
+
+ @staticmethod
+ def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
+ """Install VPP on all DUT nodes.
+
+ :param nodes: Nodes in the topology.
+ :param vpp_pkg_dir: Path to directory where VPP packages are stored.
+ :type nodes: dict
+ :type vpp_pkg_dir: str
+ """
+ VPPUtil.stop_vpp_service_on_all_duts(nodes)
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- VPPUtil.stop_vpp_service(node)
+ message = f"Failed to install VPP on host {node['host']}!"
+ if node["type"] == NodeType.DUT:
+ command = "mkdir -p /var/log/vpp/"
+ exec_cmd(node, command, sudo=True)
+
+ command = "ln -s /dev/null /etc/systemd/system/vpp.service"
+ exec_cmd(node, command, sudo=True)
+
+ command = "ln -s /dev/null /etc/sysctl.d/80-vpp.conf"
+ exec_cmd(node, command, sudo=True)
+
+ command = "apt-get purge -y '*vpp*' || true"
+ exec_cmd_no_error(node, command, timeout=120, sudo=True)
+
+ command = f"dpkg -i --force-all {vpp_pkg_dir}*.deb"
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
+
+ command = "dpkg -l | grep vpp"
+ exec_cmd_no_error(node, command, sudo=True)
@staticmethod
def verify_vpp_installed(node):
:param node: Topology node.
:type node: dict
"""
- cmd = 'command -v vpp'
+ DUTSetup.verify_program_installed(node, u"vpp")
+
+ @staticmethod
+ def adjust_privileges(node):
+ """Adjust privileges to control VPP without sudo.
+
+ :param node: Topology node.
+ :type node: dict
+ """
+ cmd = u"chmod -R o+rwx /run/vpp"
exec_cmd_no_error(
- node, cmd, message='VPP is not installed!')
+ node, cmd, sudo=True, message=u"Failed to adjust privileges!",
+ retries=120)
@staticmethod
def verify_vpp_started(node):
:param node: Topology node.
:type node: dict
"""
- cmd = 'echo "show pci" | sudo socat - UNIX-CONNECT:/run/vpp/cli.sock'
+ cmd = u"echo \"show pci\" | sudo socat - UNIX-CONNECT:/run/vpp/cli.sock"
exec_cmd_no_error(
- node, cmd, sudo=False, message='VPP failed to start!', retries=120)
+ node, cmd, sudo=False, message=u"VPP failed to start!", retries=120
+ )
- cmd = ('vppctl show pci 2>&1 | '
- 'fgrep -v "Connection refused" | '
- 'fgrep -v "No such file or directory"')
+ cmd = u"vppctl show pci 2>&1 | fgrep -v \"Connection refused\" | " \
+ u"fgrep -v \"No such file or directory\""
exec_cmd_no_error(
- node, cmd, sudo=True, message='VPP failed to start!', retries=120)
+ node, cmd, sudo=True, message=u"VPP failed to start!", retries=120
+ )
+
+ # Properly enable cards in case they were disabled. This will be
+ # followed in https://jira.fd.io/browse/VPP-1934.
+ cmd = u"for i in $(sudo vppctl sho int | grep Eth | cut -d' ' -f1); do"\
+ u" sudo vppctl set int sta $i up; done"
+ exec_cmd(node, cmd, sudo=False)
@staticmethod
def verify_vpp(node):
"""Verify that VPP is installed and started on the specified topology
- node.
+ node. Adjust privileges so user can connect without sudo.
:param node: Topology node.
:type node: dict
:raises RuntimeError: If VPP service fails to start.
"""
- VPPUtil.verify_vpp_installed(node)
+ DUTSetup.verify_program_installed(node, 'vpp')
try:
- # Verify responsivness of vppctl.
+ # Verify responsiveness of vppctl.
VPPUtil.verify_vpp_started(node)
- # Verify responsivness of PAPI.
+ # Adjust privileges.
+ VPPUtil.adjust_privileges(node)
+ # Verify responsiveness of PAPI.
VPPUtil.show_log(node)
VPPUtil.vpp_show_version(node)
finally:
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
VPPUtil.verify_vpp(node)
@staticmethod
- def vpp_show_version(node, verbose=True):
+ def vpp_show_version(
+ node, remote_vpp_socket=Constants.SOCKSVR_PATH, log=True):
"""Run "show_version" PAPI command.
+ Socket is configurable, so VPP inside container can be accessed.
+ The result is exported to JSON UTI output as "dut-version".
+
:param node: Node to run command on.
- :param verbose: Show version, compile date and compile location if True
- otherwise show only version.
+ :param remote_vpp_socket: Path to remote socket to target VPP.
+ :param log: If true, show the result in Robot log.
:type node: dict
- :type verbose: bool
+ :type remote_vpp_socket: str
+ :type log: bool
:returns: VPP version.
:rtype: str
+ :raises RuntimeError: If PAPI connection fails.
+ :raises AssertionError: If PAPI retcode is nonzero.
"""
- cmd = 'show_version'
- with PapiSocketExecutor(node) as papi_exec:
+ cmd = u"show_version"
+ with PapiSocketExecutor(node, remote_vpp_socket) as papi_exec:
reply = papi_exec.add(cmd).get_reply()
- return_version = reply['version'].rstrip('\0x00')
- version = 'VPP version: {ver}\n'.format(ver=return_version)
- if verbose:
- version += ('Compile date: {date}\n'
- 'Compile location: {cl}\n'.
- format(date=reply['build_date'].rstrip('\0x00'),
- cl=reply['build_directory'].rstrip('\0x00')))
- logger.info(version)
- return return_version
+ if log:
+ logger.info(f"VPP version: {reply[u'version']}\n")
+ version = f"{reply[u'version']}"
+ export_dut_type_and_version(u"VPP", version)
+ return version
@staticmethod
def show_vpp_version_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
VPPUtil.vpp_show_version(node)
@staticmethod
:type node: dict
"""
- cmd = 'sw_interface_dump'
+ cmd = u"sw_interface_dump"
args = dict(
name_filter_valid=False,
- name_filter=''
+ name_filter=u""
)
- err_msg = 'Failed to get interface dump on host {host}'.format(
- host=node['host'])
+ err_msg = f"Failed to get interface dump on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
for if_dump in details:
- if_dump['l2_address'] = str(if_dump['l2_address'])
- if_dump['b_dmac'] = str(if_dump['b_dmac'])
- if_dump['b_smac'] = str(if_dump['b_smac'])
- if_dump['flags'] = if_dump['flags'].value
- if_dump['type'] = if_dump['type'].value
- if_dump['link_duplex'] = if_dump['link_duplex'].value
- if_dump['sub_if_flags'] = if_dump['sub_if_flags'].value \
- if hasattr(if_dump['sub_if_flags'], 'value') \
- else int(if_dump['sub_if_flags'])
+ if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
+ if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
+ if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
+ if_dump[u"flags"] = if_dump[u"flags"].value
+ if_dump[u"type"] = if_dump[u"type"].value
+ if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
+ if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
+ if hasattr(if_dump[u"sub_if_flags"], u"value") \
+ else int(if_dump[u"sub_if_flags"])
# TODO: return only base data
- logger.trace('Interface data of host {host}:\n{details}'.format(
- host=node['host'], details=details))
+ logger.trace(f"Interface data of host {node[u'host']}:\n{details}")
@staticmethod
def vpp_enable_traces_on_dut(node, fail_on_error=False):
:type fail_on_error: bool
"""
cmds = [
- "trace add dpdk-input 50",
- "trace add vhost-user-input 50",
- "trace add memif-input 50",
- "trace add avf-input 50"
+ u"trace add dpdk-input 50",
+ u"trace add vhost-user-input 50",
+ u"trace add memif-input 50",
+ u"trace add avf-input 50"
]
for cmd in cmds:
try:
- PapiSocketExecutor.run_cli_cmd(node, cmd)
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(node, cmd)
except AssertionError:
if fail_on_error:
raise
:type fail_on_error: bool
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
VPPUtil.vpp_enable_traces_on_dut(node, fail_on_error)
@staticmethod
- def vpp_enable_elog_traces_on_dut(node):
- """Enable API/CLI/Barrier traces on the DUT node.
+ def vpp_enable_elog_traces(node):
+ """Enable API/CLI/Barrier traces on the specified topology node.
- :param node: DUT node to set up.
+ :param node: Topology node.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(node, "elog trace api cli barrier")
+ try:
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, u"event-logger trace api cli barrier")
+ except AssertionError:
+ # Perhaps an older VPP build is tested.
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, u"elog trace api cli barrier")
@staticmethod
def vpp_enable_elog_traces_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- VPPUtil.vpp_enable_elog_traces_on_dut(node)
+ if node[u"type"] == NodeType.DUT:
+ VPPUtil.vpp_enable_elog_traces(node)
@staticmethod
- def show_event_logger_on_dut(node):
- """Show event logger on the DUT node.
+ def show_event_logger(node):
+ """Show event logger on the specified topology node.
- :param node: DUT node to show traces on.
+ :param node: Topology node.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(node, "show event-logger")
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, u"show event-logger")
@staticmethod
def show_event_logger_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- VPPUtil.show_event_logger_on_dut(node)
+ if node[u"type"] == NodeType.DUT:
+ VPPUtil.show_event_logger(node)
@staticmethod
def show_log(node):
- """Show log on the specified topology node.
+ """Show logging on the specified topology node.
:param node: Topology node.
:type node: dict
- :returns: VPP log data.
- :rtype: list
"""
- return PapiSocketExecutor.run_cli_cmd(node, "show log")
+ PapiSocketExecutor.run_cli_cmd(node, u"show logging")
+
+ @staticmethod
+ def show_log_on_all_duts(nodes):
+ """Show logging on all DUTs in the given topology.
+
+ :param nodes: Nodes in the topology.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ VPPUtil.show_log(node)
@staticmethod
def vpp_show_threads(node):
:returns: VPP thread data.
:rtype: list
"""
- cmd = 'show_threads'
+ cmd = u"show_threads"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd).get_reply()
- threads_data = list()
- for thread in reply["thread_data"]:
- thread_data = list()
- for item in thread:
- if isinstance(item, unicode):
- item = item.rstrip('\x00')
- thread_data.append(item)
- threads_data.append(thread_data)
-
- logger.info("show threads:\n{threads}".format(threads=threads_data))
+ threads_data = reply[u"thread_data"]
+ logger.trace(f"show threads:\n{threads_data}")
return threads_data
+
+ @staticmethod
+ def vpp_add_graph_node_next(node, graph_node_name, graph_next_name):
+ """Set the next node for a given node.
+
+ :param node: Node to run command on.
+ :param graph_node_name: Graph node to add the next node on.
+ :param graph_next_name: Graph node to add as the next node.
+ :type node: dict
+ :type graph_node_name: str
+ :type graph_next_name: str
+ :returns: The index of the next graph node.
+ :rtype: int
+ """
+ cmd = u"add_node_next"
+ args = dict(
+ node_name=graph_node_name,
+ next_name=graph_next_name
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ reply = papi_exec.add(cmd, **args).get_reply()
+
+ return reply[u"next_index"]
+
+ @staticmethod
+ def vpp_set_neighbor_limit_on_all_duts(nodes, count):
+ """VPP set neighbor count limit on all DUTs in the given topology.
+
+ :param nodes: Nodes in the topology.
+ :param count: Neighbor count need to set.
+ :type nodes: dict
+ :type count: int
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ cmd = f"set ip neighbor-config ip4 limit {count}"
+ PapiSocketExecutor.run_cli_cmd(node, cmd)
+
+ cmd = f"set ip neighbor-config ip6 limit {count}"
+ PapiSocketExecutor.run_cli_cmd(node, cmd)