X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FInterfaceUtil.py;h=4cd7cf10b69b0d925c2030ed5e9daa982d6fb43d;hp=481c122e3f2cefdae76615ef773da8f88d108f47;hb=7d849ba64e10b8a7678845ee1dcc091e125dd124;hpb=7335a3fabef47d2d3ee0f0bffc2c28cb3307156c diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py index 481c122e3f..4cd7cf10b6 100644 --- a/resources/libraries/python/InterfaceUtil.py +++ b/resources/libraries/python/InterfaceUtil.py @@ -20,7 +20,6 @@ from ipaddress import ip_address from robot.api import logger from resources.libraries.python.Constants import Constants -from resources.libraries.python.CpuUtils import CpuUtils from resources.libraries.python.DUTSetup import DUTSetup from resources.libraries.python.IPAddress import IPAddress from resources.libraries.python.L2Util import L2Util @@ -1061,6 +1060,49 @@ class InterfaceUtil: return ifc_name, sw_if_index + @staticmethod + def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip): + """Create GTPU interface and return sw if index of created interface. + + :param node: Node where to create GTPU interface. + :param teid: GTPU Tunnel Endpoint Identifier. + :param source_ip: Source IP of a GTPU Tunnel End Point. + :param destination_ip: Destination IP of a GTPU Tunnel End Point. + :type node: dict + :type teid: int + :type source_ip: str + :type destination_ip: str + :returns: SW IF INDEX of created interface. + :rtype: int + :raises RuntimeError: if it is unable to create GTPU interface on the + node. + """ + cmd = u"gtpu_add_del_tunnel" + args = dict( + is_add=True, + src_address=IPAddress.create_ip_address_object( + ip_address(source_ip) + ), + dst_address=IPAddress.create_ip_address_object( + ip_address(destination_ip) + ), + mcast_sw_if_index=Constants.BITWISE_NON_ZERO, + encap_vrf_id=0, + decap_next_index=2, + teid=teid + ) + err_msg = f"Failed to create GTPU tunnel interface " \ + f"on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: + sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + + if_key = Topology.add_new_port(node, u"gtpu_tunnel") + Topology.update_interface_sw_if_index(node, if_key, sw_if_index) + ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) + Topology.update_interface_name(node, if_key, ifc_name) + + return sw_if_index + @staticmethod def vpp_create_loopback(node, mac=None): """Create loopback interface on VPP node. @@ -1216,8 +1258,19 @@ class InterfaceUtil: txq_size=txq_size ) err_msg = f"Failed to create AVF interface on host {node[u'host']}" - with PapiSocketExecutor(node) as papi_exec: - sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + + # FIXME: Remove once the fw/driver is upgraded. + for _ in range(10): + with PapiSocketExecutor(node) as papi_exec: + try: + sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index( + err_msg + ) + break + except AssertionError: + logger.error(err_msg) + else: + raise AssertionError(err_msg) InterfaceUtil.add_eth_interface( node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf", @@ -1729,14 +1782,19 @@ class InterfaceUtil: ) elif driver == u"af_xdp": if kernel_driver not in ( - u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"): + u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core", + u"ixgbe"): raise RuntimeError( - f"AF_XDP needs ice or i40e or rdma compatible driver, not " + f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not " f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}" ) vf_keys = InterfaceUtil.init_generic_interface( node, ifc_key, numvfs=numvfs, osi_layer=osi_layer ) + elif driver == u"rdma-core": + vf_keys = InterfaceUtil.init_generic_interface( + node, ifc_key, numvfs=numvfs, osi_layer=osi_layer + ) return vf_keys @staticmethod @@ -1770,8 +1828,9 @@ class InterfaceUtil: # PCI device must be re-bound to kernel driver before creating VFs. DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True) # Stop VPP to prevent deadlock. - # Unbind from current driver. - DUTSetup.pci_driver_unbind(node, pf_pci_addr) + # Unbind from current driver if bound. + if current_driver: + DUTSetup.pci_driver_unbind(node, pf_pci_addr) # Bind to kernel driver. DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver) @@ -1887,64 +1946,69 @@ class InterfaceUtil: @staticmethod def vpp_round_robin_rx_placement( - node, prefix, dp_worker_limit=None): + node, prefix, workers=None): """Set Round Robin interface RX placement on all worker threads on node. - If specified, dp_core_limit limits the number of physical cores used + If specified, workers limits the number of physical cores used for data plane I/O work. Other cores are presumed to do something else, e.g. asynchronous crypto processing. None means all workers are used for data plane work. - Note this keyword specifies workers, not cores. :param node: Topology nodes. :param prefix: Interface name prefix. - :param dp_worker_limit: How many cores for data plane work. + :param workers: Comma separated worker index numbers intended for + dataplane work. :type node: dict :type prefix: str - :type dp_worker_limit: Optional[int] + :type workers: str """ - worker_id = 0 - worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1 - if dp_worker_limit is not None: - worker_cnt = min(worker_cnt, dp_worker_limit) + thread_data = VPPUtil.vpp_show_threads(node) + worker_cnt = len(thread_data) - 1 if not worker_cnt: - return + return None + worker_ids = list() + if workers: + for item in thread_data: + if str(item.cpu_id) in workers.split(u","): + worker_ids.append(item.id) + else: + for item in thread_data: + if u"vpp_main" not in item.name: + worker_ids.append(item.id) + + worker_idx = 0 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node): for interface in node[u"interfaces"].values(): if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \ and prefix in interface[u"name"]: InterfaceUtil.vpp_sw_interface_set_rx_placement( node, placement[u"sw_if_index"], placement[u"queue_id"], - worker_id % worker_cnt + worker_ids[worker_idx % len(worker_ids)] - 1 ) - worker_id += 1 + worker_idx += 1 @staticmethod def vpp_round_robin_rx_placement_on_all_duts( - nodes, prefix, dp_core_limit=None): - """Set Round Robin interface RX placement on all worker threads + nodes, prefix, workers=None): + """Set Round Robin interface RX placement on worker threads on all DUTs. - If specified, dp_core_limit limits the number of physical cores used + If specified, workers limits the number of physical cores used for data plane I/O work. Other cores are presumed to do something else, e.g. asynchronous crypto processing. None means all cores are used for data plane work. - Note this keyword specifies cores, not workers. :param nodes: Topology nodes. :param prefix: Interface name prefix. - :param dp_worker_limit: How many cores for data plane work. + :param workers: Comma separated worker index numbers intended for + dataplane work. :type nodes: dict :type prefix: str - :type dp_worker_limit: Optional[int] + :type workers: str """ for node in nodes.values(): if node[u"type"] == NodeType.DUT: - dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt( - phy_cores=dp_core_limit, - smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]), - ) InterfaceUtil.vpp_round_robin_rx_placement( - node, prefix, dp_worker_limit + node, prefix, workers )