X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FInterfaceUtil.py;h=4cd7cf10b69b0d925c2030ed5e9daa982d6fb43d;hp=3e2e38ef811e7f58ee9bc1704900b0123d5dc2bb;hb=7d849ba64e10b8a7678845ee1dcc091e125dd124;hpb=6bcf4d40d83bbf026f9fd0105bebf579423c65a6 diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py index 3e2e38ef81..4cd7cf10b6 100644 --- a/resources/libraries/python/InterfaceUtil.py +++ b/resources/libraries/python/InterfaceUtil.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 Cisco and/or its affiliates. +# Copyright (c) 2021 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -20,12 +20,12 @@ from ipaddress import ip_address from robot.api import logger from resources.libraries.python.Constants import Constants -from resources.libraries.python.CpuUtils import CpuUtils from resources.libraries.python.DUTSetup import DUTSetup +from resources.libraries.python.IPAddress import IPAddress from resources.libraries.python.L2Util import L2Util from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.parsers.JsonParser import JsonParser -from resources.libraries.python.ssh import SSH, exec_cmd_no_error +from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error from resources.libraries.python.topology import NodeType, Topology from resources.libraries.python.VPPUtil import VPPUtil @@ -110,6 +110,13 @@ class RdmaMode(IntEnum): RDMA_API_MODE_DV = 2 +class AfXdpMode(IntEnum): + """AF_XDP interface mode.""" + AF_XDP_API_MODE_AUTO = 0 + AF_XDP_API_MODE_COPY = 1 + AF_XDP_API_MODE_ZERO_COPY = 2 + + class InterfaceUtil: """General utilities for managing interfaces""" @@ -228,45 +235,97 @@ class InterfaceUtil: ) @staticmethod - def set_interface_ethernet_mtu(node, iface_key, mtu): - """Set Ethernet MTU for specified interface. + def set_interface_state_pci( + node, pf_pcis, namespace=None, state=u"up"): + """Set operational state for interface specified by PCI address. + + :param node: Topology node. + :param pf_pcis: List of node's interfaces PCI addresses. + :param namespace: Exec command in namespace. (Optional, Default: none) + :param state: Up/Down. (Optional, default: up) + :type nodes: dict + :type pf_pcis: list + :type namespace: str + :type state: str + """ + for pf_pci in pf_pcis: + pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci) + InterfaceUtil.set_linux_interface_state( + node, pf_eth, namespace=namespace, state=state + ) - Function can be used only for TGs. + @staticmethod + def set_interface_mtu(node, pf_pcis, mtu=9200): + """Set Ethernet MTU for specified interfaces. - :param node: Node where the interface is. - :param iface_key: Interface key from topology file. - :param mtu: MTU to set. - :type node: dict - :type iface_key: str + :param node: Topology node. + :param pf_pcis: List of node's interfaces PCI addresses. + :param mtu: MTU to set. Default: 9200. + :type nodes: dict + :type pf_pcis: list :type mtu: int - :returns: Nothing. - :raises ValueError: If the node type is "DUT". - :raises ValueError: If the node has an unknown node type. + :raises RuntimeError: If failed to set MTU on interface. """ - if node[u"type"] == NodeType.DUT: - msg = f"Node {node[u'host']}: Setting Ethernet MTU for interface " \ - f"on DUT nodes not supported" - elif node[u"type"] != NodeType.TG: - msg = f"Node {node[u'host']} has unknown NodeType: {node[u'type']}" - else: - iface_name = Topology.get_interface_name(node, iface_key) - cmd = f"ip link set {iface_name} mtu {mtu}" + for pf_pci in pf_pcis: + pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci) + cmd = f"ip link set {pf_eth} mtu {mtu}" exec_cmd_no_error(node, cmd, sudo=True) - return - raise ValueError(msg) @staticmethod - def set_default_ethernet_mtu_on_all_interfaces_on_node(node): - """Set default Ethernet MTU on all interfaces on node. + def set_interface_channels( + node, pf_pcis, num_queues=1, channel=u"combined"): + """Set interface channels for specified interfaces. + + :param node: Topology node. + :param pf_pcis: List of node's interfaces PCI addresses. + :param num_queues: Number of channels. (Optional, Default: 1) + :param channel: Channel type. (Optional, Default: combined) + :type nodes: dict + :type pf_pcis: list + :type num_queues: int + :type channel: str + """ + for pf_pci in pf_pcis: + pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci) + cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}" + exec_cmd_no_error(node, cmd, sudo=True) - Function can be used only for TGs. + @staticmethod + def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"): + """Set Ethernet flow control for specified interfaces. - :param node: Node where to set default MTU. - :type node: dict - :returns: Nothing. + :param node: Topology node. + :param pf_pcis: List of node's interfaces PCI addresses. + :param rxf: RX flow. (Optional, Default: off). + :param txf: TX flow. (Optional, Default: off). + :type nodes: dict + :type pf_pcis: list + :type rxf: str + :type txf: str """ - for ifc in node[u"interfaces"]: - InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500) + for pf_pci in pf_pcis: + pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci) + cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}" + ret_code, _, _ = exec_cmd(node, cmd, sudo=True) + if int(ret_code) not in (0, 78): + raise RuntimeError("Failed to set flow control on {pf_eth}!") + + @staticmethod + def set_pci_parameter(node, pf_pcis, key, value): + """Set PCI parameter for specified interfaces. + + :param node: Topology node. + :param pf_pcis: List of node's interfaces PCI addresses. + :param key: Key to set. + :param value: Value to set. + :type nodes: dict + :type pf_pcis: list + :type key: str + :type value: str + """ + for pf_pci in pf_pcis: + cmd = f"setpci -s {pf_pci} {key}={value}" + exec_cmd_no_error(node, cmd, sudo=True) @staticmethod def vpp_set_interface_mtu(node, interface, mtu=9200): @@ -294,8 +353,7 @@ class InterfaceUtil: with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) except AssertionError as err: - # TODO: Make failure tolerance optional. - logger.debug(f"Setting MTU failed. Expected?\n{err}") + logger.debug(f"Setting MTU failed.\n{err}") @staticmethod def vpp_set_interfaces_mtu_on_node(node, mtu=9200): @@ -491,6 +549,27 @@ class InterfaceUtil: return if_data.get(u"l2_address") + @staticmethod + def vpp_set_interface_mac(node, interface, mac): + """Set MAC address for the given interface. + + :param node: VPP node to set interface MAC. + :param interface: Numeric index or name string of a specific interface. + :param mac: Required MAC address. + :type node: dict + :type interface: int or str + :type mac: str + """ + cmd = u"sw_interface_set_mac_address" + args = dict( + sw_if_index=InterfaceUtil.get_interface_index(node, interface), + mac_address=L2Util.mac_to_bin(mac) + ) + err_msg = f"Failed to set MAC address of interface {interface}" \ + f"on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: + papi_exec.add(cmd, **args).get_reply(err_msg) + @staticmethod def tg_set_interface_driver(node, pci_addr, driver): """Set interface driver on the TG node. @@ -674,14 +753,6 @@ class InterfaceUtil: :raises ValueError: If numa node ia less than 0. :raises RuntimeError: If update of numa node failed. """ - def check_cpu_node_count(node_n, val): - val = int(val) - if val < 0: - if CpuUtils.cpu_node_count(node_n) == 1: - val = 0 - else: - raise ValueError - return val ssh = SSH() for if_key in Topology.get_node_interfaces(node): if_pci = Topology.get_interface_pci_addr(node, if_key) @@ -691,7 +762,7 @@ class InterfaceUtil: ret, out, _ = ssh.exec_command(cmd) if ret == 0: try: - numa_node = check_cpu_node_count(node, out) + numa_node = 0 if int(out) < 0 else int(out) except ValueError: logger.trace( f"Reading numa location failed for: {if_pci}" @@ -778,16 +849,16 @@ class InterfaceUtil: :raises RuntimeError: if it is unable to create VxLAN interface on the node. """ - src_address = ip_address(source_ip) - dst_address = ip_address(destination_ip) - cmd = u"vxlan_add_del_tunnel" args = dict( - is_add=1, - is_ipv6=1 if src_address.version == 6 else 0, + is_add=True, instance=Constants.BITWISE_NON_ZERO, - src_address=src_address.packed, - dst_address=dst_address.packed, + src_address=IPAddress.create_ip_address_object( + ip_address(source_ip) + ), + dst_address=IPAddress.create_ip_address_object( + ip_address(destination_ip) + ), mcast_sw_if_index=Constants.BITWISE_NON_ZERO, encap_vrf_id=0, decap_next_index=Constants.BITWISE_NON_ZERO, @@ -825,9 +896,9 @@ class InterfaceUtil: cmd = u"sw_interface_set_vxlan_bypass" args = dict( - is_ipv6=0, + is_ipv6=False, sw_if_index=sw_if_index, - enable=1 + enable=True ) err_msg = f"Failed to set VXLAN bypass on interface " \ f"on host {node[u'host']}" @@ -857,16 +928,8 @@ class InterfaceUtil: :returns: Processed vxlan interface dump. :rtype: dict """ - if vxlan_dump[u"is_ipv6"]: - vxlan_dump[u"src_address"] = \ - ip_address(vxlan_dump[u"src_address"]) - vxlan_dump[u"dst_address"] = \ - ip_address(vxlan_dump[u"dst_address"]) - else: - vxlan_dump[u"src_address"] = \ - ip_address(vxlan_dump[u"src_address"][0:4]) - vxlan_dump[u"dst_address"] = \ - ip_address(vxlan_dump[u"dst_address"][0:4]) + vxlan_dump[u"src_address"] = str(vxlan_dump[u"src_address"]) + vxlan_dump[u"dst_address"] = str(vxlan_dump[u"dst_address"]) return vxlan_dump if interface is not None: @@ -997,6 +1060,49 @@ class InterfaceUtil: return ifc_name, sw_if_index + @staticmethod + def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip): + """Create GTPU interface and return sw if index of created interface. + + :param node: Node where to create GTPU interface. + :param teid: GTPU Tunnel Endpoint Identifier. + :param source_ip: Source IP of a GTPU Tunnel End Point. + :param destination_ip: Destination IP of a GTPU Tunnel End Point. + :type node: dict + :type teid: int + :type source_ip: str + :type destination_ip: str + :returns: SW IF INDEX of created interface. + :rtype: int + :raises RuntimeError: if it is unable to create GTPU interface on the + node. + """ + cmd = u"gtpu_add_del_tunnel" + args = dict( + is_add=True, + src_address=IPAddress.create_ip_address_object( + ip_address(source_ip) + ), + dst_address=IPAddress.create_ip_address_object( + ip_address(destination_ip) + ), + mcast_sw_if_index=Constants.BITWISE_NON_ZERO, + encap_vrf_id=0, + decap_next_index=2, + teid=teid + ) + err_msg = f"Failed to create GTPU tunnel interface " \ + f"on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: + sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + + if_key = Topology.add_new_port(node, u"gtpu_tunnel") + Topology.update_interface_sw_if_index(node, if_key, sw_if_index) + ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) + Topology.update_interface_name(node, if_key, ifc_name) + + return sw_if_index + @staticmethod def vpp_create_loopback(node, mac=None): """Create loopback interface on VPP node. @@ -1010,9 +1116,11 @@ class InterfaceUtil: :raises RuntimeError: If it is not possible to create loopback on the node. """ - cmd = u"create_loopback" + cmd = u"create_loopback_instance" args = dict( - mac_address=L2Util.mac_to_bin(mac) if mac else 0 + mac_address=L2Util.mac_to_bin(mac) if mac else 0, + is_specified=False, + user_instance=0, ) err_msg = f"Failed to create loopback interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: @@ -1029,24 +1137,28 @@ class InterfaceUtil: return sw_if_index @staticmethod - def vpp_create_bond_interface(node, mode, load_balance=None, mac=None): + def vpp_create_bond_interface( + node, mode, load_balance=None, mac=None, gso=False): """Create bond interface on VPP node. :param node: DUT node from topology. :param mode: Link bonding mode. :param load_balance: Load balance (optional, valid for xor and lacp - modes, otherwise ignored). + modes, otherwise ignored). Default: None. :param mac: MAC address to assign to the bond interface (optional). + Default: None. + :param gso: Enable GSO support (optional). Default: False. :type node: dict :type mode: str :type load_balance: str :type mac: str + :type gso: bool :returns: Interface key (name) in topology. :rtype: str :raises RuntimeError: If it is not possible to create bond interface on the node. """ - cmd = u"bond_create" + cmd = u"bond_create2" args = dict( id=int(Constants.BITWISE_NON_ZERO), use_custom_mac=bool(mac is not None), @@ -1059,7 +1171,8 @@ class InterfaceUtil: LinkBondLoadBalanceAlgo, f"BOND_API_LB_ALGO_{load_balance.upper()}" ).value, - numa_only=False + numa_only=False, + enable_gso=gso ) err_msg = f"Failed to create bond interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: @@ -1111,16 +1224,21 @@ class InterfaceUtil: ) @staticmethod - def vpp_create_avf_interface(node, if_key, num_rx_queues=None): + def vpp_create_avf_interface( + node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0): """Create AVF interface on VPP node. :param node: DUT node from topology. :param if_key: Interface key from topology file of interface to be bound to i40evf driver. :param num_rx_queues: Number of RX queues. + :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP). + :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP). :type node: dict :type if_key: str :type num_rx_queues: int + :type rxq_size: int + :type txq_size: int :returns: AVF interface key (name) in topology. :rtype: str :raises RuntimeError: If it is not possible to create AVF interface on @@ -1136,15 +1254,78 @@ class InterfaceUtil: pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr), enable_elog=0, rxq_num=int(num_rx_queues) if num_rx_queues else 0, - rxq_size=0, - txq_size=0 + rxq_size=rxq_size, + txq_size=txq_size ) err_msg = f"Failed to create AVF interface on host {node[u'host']}" + + # FIXME: Remove once the fw/driver is upgraded. + for _ in range(10): + with PapiSocketExecutor(node) as papi_exec: + try: + sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index( + err_msg + ) + break + except AssertionError: + logger.error(err_msg) + else: + raise AssertionError(err_msg) + + InterfaceUtil.add_eth_interface( + node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf", + host_if_key=if_key + ) + + return Topology.get_interface_by_sw_index(node, sw_if_index) + + @staticmethod + def vpp_create_af_xdp_interface( + node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0, + mode=u"auto"): + """Create AF_XDP interface on VPP node. + + :param node: DUT node from topology. + :param if_key: Physical interface key from topology file of interface + to be bound to compatible driver. + :param num_rx_queues: Number of RX queues. (Optional, Default: none) + :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP). + :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP). + :param mode: AF_XDP interface mode. (Optional, Default: auto). + :type node: dict + :type if_key: str + :type num_rx_queues: int + :type rxq_size: int + :type txq_size: int + :type mode: str + :returns: Interface key (name) in topology file. + :rtype: str + :raises RuntimeError: If it is not possible to create AF_XDP interface + on the node. + """ + PapiSocketExecutor.run_cli_cmd( + node, u"set logging class af_xdp level debug" + ) + + cmd = u"af_xdp_create" + pci_addr = Topology.get_interface_pci_addr(node, if_key) + args = dict( + name=InterfaceUtil.pci_to_eth(node, pci_addr), + host_if=InterfaceUtil.pci_to_eth(node, pci_addr), + rxq_num=int(num_rx_queues) if num_rx_queues else 0, + rxq_size=rxq_size, + txq_size=txq_size, + mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value + ) + err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + InterfaceUtil.vpp_set_interface_mac( + node, sw_if_index, Topology.get_interface_mac(node, if_key) + ) InterfaceUtil.add_eth_interface( - node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf", + node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp", host_if_key=if_key ) @@ -1152,37 +1333,52 @@ class InterfaceUtil: @staticmethod def vpp_create_rdma_interface( - node, if_key, num_rx_queues=None, mode=u"auto"): + node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0, + mode=u"auto"): """Create RDMA interface on VPP node. :param node: DUT node from topology. :param if_key: Physical interface key from topology file of interface to be bound to rdma-core driver. :param num_rx_queues: Number of RX queues. + :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP). + :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP). :param mode: RDMA interface mode - auto/ibv/dv. :type node: dict :type if_key: str :type num_rx_queues: int + :type rxq_size: int + :type txq_size: int :type mode: str :returns: Interface key (name) in topology file. :rtype: str :raises RuntimeError: If it is not possible to create RDMA interface on the node. """ - cmd = u"rdma_create" + PapiSocketExecutor.run_cli_cmd( + node, u"set logging class rdma level debug" + ) + + cmd = u"rdma_create_v2" pci_addr = Topology.get_interface_pci_addr(node, if_key) args = dict( name=InterfaceUtil.pci_to_eth(node, pci_addr), host_if=InterfaceUtil.pci_to_eth(node, pci_addr), rxq_num=int(num_rx_queues) if num_rx_queues else 0, - rxq_size=1024, - txq_size=1024, - mode=getattr(RdmaMode,f"RDMA_API_MODE_{mode.upper()}").value, + rxq_size=rxq_size, + txq_size=txq_size, + mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value, + # Note: Set True for non-jumbo packets. + no_multi_seg=False, + max_pktlen=0, ) err_msg = f"Failed to create RDMA interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + InterfaceUtil.vpp_set_interface_mac( + node, sw_if_index, Topology.get_interface_mac(node, if_key) + ) InterfaceUtil.add_eth_interface( node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma", host_if_key=if_key @@ -1191,8 +1387,8 @@ class InterfaceUtil: return Topology.get_interface_by_sw_index(node, sw_if_index) @staticmethod - def vpp_enslave_physical_interface(node, interface, bond_if): - """Enslave physical interface to bond interface on VPP node. + def vpp_add_bond_member(node, interface, bond_if): + """Add member interface to bond interface on VPP node. :param node: DUT node from topology. :param interface: Physical interface key from topology file. @@ -1200,18 +1396,18 @@ class InterfaceUtil: :type node: dict :type interface: str :type bond_if: str - :raises RuntimeError: If it is not possible to enslave physical - interface to bond interface on the node. + :raises RuntimeError: If it is not possible to add member to bond + interface on the node. """ - cmd = u"bond_enslave" + cmd = u"bond_add_member" args = dict( sw_if_index=Topology.get_interface_sw_index(node, interface), bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if), is_passive=False, is_long_timeout=False ) - err_msg = f"Failed to enslave physical interface {interface} to bond " \ - f"interface {bond_if} on host {node[u'host']}" + err_msg = f"Failed to add member {interface} to bond interface " \ + f"{bond_if} on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -1224,7 +1420,7 @@ class InterfaceUtil: :type node: dict :type verbose: bool """ - cmd = u"sw_interface_bond_dump" + cmd = u"sw_bond_interface_dump" err_msg = f"Failed to get bond interface dump on host {node[u'host']}" data = f"Bond data on node {node[u'host']}:\n" @@ -1239,26 +1435,26 @@ class InterfaceUtil: data += u" load balance: {lb}\n".format( lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower() ) - data += f" number of active slaves: {bond[u'active_slaves']}\n" + data += f" number of active members: {bond[u'active_members']}\n" if verbose: - slave_data = InterfaceUtil.vpp_bond_slave_dump( + member_data = InterfaceUtil.vpp_bond_member_dump( node, Topology.get_interface_by_sw_index( node, bond[u"sw_if_index"] ) ) - for slave in slave_data: - if not slave[u"is_passive"]: - data += f" {slave[u'interface_name']}\n" - data += f" number of slaves: {bond[u'slaves']}\n" + for member in member_data: + if not member[u"is_passive"]: + data += f" {member[u'interface_name']}\n" + data += f" number of members: {bond[u'members']}\n" if verbose: - for slave in slave_data: - data += f" {slave[u'interface_name']}\n" + for member in member_data: + data += f" {member[u'interface_name']}\n" data += f" interface id: {bond[u'id']}\n" data += f" sw_if_index: {bond[u'sw_if_index']}\n" logger.info(data) @staticmethod - def vpp_bond_slave_dump(node, interface): + def vpp_bond_member_dump(node, interface): """Get bond interface slave(s) data on VPP node. :param node: DUT node from topology. @@ -1268,7 +1464,7 @@ class InterfaceUtil: :returns: Bond slave interface data. :rtype: dict """ - cmd = u"sw_interface_slave_dump" + cmd = u"sw_member_interface_dump" args = dict( sw_if_index=Topology.get_interface_sw_index(node, interface) ) @@ -1277,7 +1473,7 @@ class InterfaceUtil: with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) - logger.debug(f"Slave data:\n{details}") + logger.debug(f"Member data:\n{details}") return details @staticmethod @@ -1353,6 +1549,8 @@ class InterfaceUtil: def get_sw_if_index(node, interface_name): """Get sw_if_index for the given interface from actual interface dump. + FIXME: Delete and redirect callers to vpp_get_interface_sw_index. + :param node: VPP node to get interface data from. :param interface_name: Name of the specific interface. :type node: dict @@ -1467,6 +1665,29 @@ class InterfaceUtil: cmd = f"{ns_str} ip link set {interface} {mac_str}" exec_cmd_no_error(node, cmd, sudo=True) + @staticmethod + def set_linux_interface_promisc( + node, interface, namespace=None, vf_id=None, state=u"on"): + """Set promisc state for interface in linux. + + :param node: Node where to execute command. + :param interface: Interface in namespace. + :param namespace: Exec command in namespace. (Optional, Default: None) + :param vf_id: Virtual Function id. (Optional, Default: None) + :param state: State of feature. (Optional, Default: on) + :type node: dict + :type interface: str + :type namespace: str + :type vf_id: int + :type state: str + """ + promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \ + else f"promisc {state}" + ns_str = f"ip netns exec {namespace}" if namespace else u"" + + cmd = f"{ns_str} ip link set dev {interface} {promisc_str}" + exec_cmd_no_error(node, cmd, sudo=True) + @staticmethod def set_linux_interface_trust_on( node, interface, namespace=None, vf_id=None): @@ -1509,9 +1730,76 @@ class InterfaceUtil: exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"): - """Init PCI device by creating VIFs and bind them to vfio-pci for AVF - driver testing on DUT. + def set_linux_interface_state( + node, interface, namespace=None, state=u"up"): + """Set operational state for interface in linux. + + :param node: Node where to execute command. + :param interface: Interface in namespace. + :param namespace: Execute command in namespace. Optional + :param state: Up/Down. + :type node: dict + :type interface: str + :type namespace: str + :type state: str + """ + ns_str = f"ip netns exec {namespace}" if namespace else u"" + + cmd = f"{ns_str} ip link set dev {interface} {state}" + exec_cmd_no_error(node, cmd, sudo=True) + + @staticmethod + def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"): + """Init PCI device. Check driver compatibility and bind to proper + drivers. Optionally create NIC VFs. + + :param node: DUT node. + :param ifc_key: Interface key from topology file. + :param driver: Base driver to use. + :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs. + :param osi_layer: OSI Layer type to initialize TG with. + Default value "L2" sets linux interface spoof off. + :type node: dict + :type ifc_key: str + :type driver: str + :type numvfs: int + :type osi_layer: str + :returns: Virtual Function topology interface keys. + :rtype: list + :raises RuntimeError: If a reason preventing initialization is found. + """ + kernel_driver = Topology.get_interface_driver(node, ifc_key) + vf_keys = [] + if driver == u"avf": + if kernel_driver not in ( + u"ice", u"iavf", u"i40e", u"i40evf"): + raise RuntimeError( + f"AVF needs ice or i40e compatible driver, not " + f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}" + ) + vf_keys = InterfaceUtil.init_generic_interface( + node, ifc_key, numvfs=numvfs, osi_layer=osi_layer + ) + elif driver == u"af_xdp": + if kernel_driver not in ( + u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core", + u"ixgbe"): + raise RuntimeError( + f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not " + f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}" + ) + vf_keys = InterfaceUtil.init_generic_interface( + node, ifc_key, numvfs=numvfs, osi_layer=osi_layer + ) + elif driver == u"rdma-core": + vf_keys = InterfaceUtil.init_generic_interface( + node, ifc_key, numvfs=numvfs, osi_layer=osi_layer + ) + return vf_keys + + @staticmethod + def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"): + """Init PCI device. Bind to proper drivers. Optionally create NIC VFs. :param node: DUT node. :param ifc_key: Interface key from topology file. @@ -1531,27 +1819,28 @@ class InterfaceUtil: pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":") uio_driver = Topology.get_uio_driver(node) kernel_driver = Topology.get_interface_driver(node, ifc_key) - if kernel_driver not in (u"i40e", u"i40evf"): - raise RuntimeError( - f"AVF needs i40e-compatible driver, not {kernel_driver} " - f"at node {node[u'host']} ifc {ifc_key}" - ) current_driver = DUTSetup.get_pci_dev_driver( node, pf_pci_addr.replace(u":", r"\:")) + pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`" VPPUtil.stop_vpp_service(node) if current_driver != kernel_driver: # PCI device must be re-bound to kernel driver before creating VFs. DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True) # Stop VPP to prevent deadlock. - # Unbind from current driver. - DUTSetup.pci_driver_unbind(node, pf_pci_addr) + # Unbind from current driver if bound. + if current_driver: + DUTSetup.pci_driver_unbind(node, pf_pci_addr) # Bind to kernel driver. DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver) # Initialize PCI VFs. DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs) + if not numvfs: + if osi_layer == u"L2": + InterfaceUtil.set_linux_interface_promisc(node, pf_dev) + vf_ifc_keys = [] # Set MAC address and bind each virtual function to uio driver. for vf_id in range(numvfs): @@ -1561,7 +1850,6 @@ class InterfaceUtil: ] ) - pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`" InterfaceUtil.set_linux_interface_trust_on( node, pf_dev, vf_id=vf_id ) @@ -1572,6 +1860,9 @@ class InterfaceUtil: InterfaceUtil.set_linux_interface_mac( node, pf_dev, vf_mac_addr, vf_id=vf_id ) + InterfaceUtil.set_linux_interface_state( + node, pf_dev, state=u"up" + ) DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id) DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver) @@ -1612,6 +1903,19 @@ class InterfaceUtil: details = papi_exec.get_details(err_msg) return sorted(details, key=lambda k: k[u"sw_if_index"]) + @staticmethod + def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes): + """Dump VPP interface RX placement on all given nodes. + + :param nodes: Nodes to run command on. + :type nodes: dict + :returns: Thread mapping information as a list of dictionaries. + :rtype: list + """ + for node in nodes.values(): + if node[u"type"] == NodeType.DUT: + InterfaceUtil.vpp_sw_interface_rx_placement_dump(node) + @staticmethod def vpp_sw_interface_set_rx_placement( node, sw_if_index, queue_id, worker_id): @@ -1641,39 +1945,70 @@ class InterfaceUtil: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod - def vpp_round_robin_rx_placement(node, prefix): + def vpp_round_robin_rx_placement( + node, prefix, workers=None): """Set Round Robin interface RX placement on all worker threads on node. + If specified, workers limits the number of physical cores used + for data plane I/O work. Other cores are presumed to do something else, + e.g. asynchronous crypto processing. + None means all workers are used for data plane work. + :param node: Topology nodes. :param prefix: Interface name prefix. + :param workers: Comma separated worker index numbers intended for + dataplane work. :type node: dict :type prefix: str + :type workers: str """ - worker_id = 0 - worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1 + thread_data = VPPUtil.vpp_show_threads(node) + worker_cnt = len(thread_data) - 1 if not worker_cnt: - return + return None + worker_ids = list() + if workers: + for item in thread_data: + if str(item.cpu_id) in workers.split(u","): + worker_ids.append(item.id) + else: + for item in thread_data: + if u"vpp_main" not in item.name: + worker_ids.append(item.id) + + worker_idx = 0 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node): for interface in node[u"interfaces"].values(): if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \ and prefix in interface[u"name"]: InterfaceUtil.vpp_sw_interface_set_rx_placement( node, placement[u"sw_if_index"], placement[u"queue_id"], - worker_id % worker_cnt + worker_ids[worker_idx % len(worker_ids)] - 1 ) - worker_id += 1 + worker_idx += 1 @staticmethod - def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix): - """Set Round Robin interface RX placement on all worker threads + def vpp_round_robin_rx_placement_on_all_duts( + nodes, prefix, workers=None): + """Set Round Robin interface RX placement on worker threads on all DUTs. + If specified, workers limits the number of physical cores used + for data plane I/O work. Other cores are presumed to do something else, + e.g. asynchronous crypto processing. + None means all cores are used for data plane work. + :param nodes: Topology nodes. :param prefix: Interface name prefix. + :param workers: Comma separated worker index numbers intended for + dataplane work. :type nodes: dict :type prefix: str + :type workers: str """ for node in nodes.values(): if node[u"type"] == NodeType.DUT: - InterfaceUtil.vpp_round_robin_rx_placement(node, prefix) + InterfaceUtil.vpp_round_robin_rx_placement( + node, prefix, workers + )