X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FInterfaceUtil.py;h=4a53f71a015e68001580f00bd501eb9b5faf0734;hp=481c122e3f2cefdae76615ef773da8f88d108f47;hb=d07f6cae7f18c1513650d4cb690115d60201e704;hpb=7335a3fabef47d2d3ee0f0bffc2c28cb3307156c diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py index 481c122e3f..4a53f71a01 100644 --- a/resources/libraries/python/InterfaceUtil.py +++ b/resources/libraries/python/InterfaceUtil.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 Cisco and/or its affiliates. +# Copyright (c) 2022 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -20,7 +20,6 @@ from ipaddress import ip_address from robot.api import logger from resources.libraries.python.Constants import Constants -from resources.libraries.python.CpuUtils import CpuUtils from resources.libraries.python.DUTSetup import DUTSetup from resources.libraries.python.IPAddress import IPAddress from resources.libraries.python.L2Util import L2Util @@ -213,6 +212,10 @@ class InterfaceUtil: raise ValueError(f"Unknown if_type: {if_type}") if node[u"type"] == NodeType.DUT: + if sw_if_index is None: + raise ValueError( + f"Interface index for {interface} not assigned by VPP." + ) if state == u"up": flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value elif state == u"down": @@ -356,31 +359,6 @@ class InterfaceUtil: except AssertionError as err: logger.debug(f"Setting MTU failed.\n{err}") - @staticmethod - def vpp_set_interfaces_mtu_on_node(node, mtu=9200): - """Set Ethernet MTU on all interfaces. - - :param node: VPP node. - :param mtu: Ethernet MTU size in Bytes. Default: 9200. - :type node: dict - :type mtu: int - """ - for interface in node[u"interfaces"]: - InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu) - - @staticmethod - def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200): - """Set Ethernet MTU on all interfaces on all DUTs. - - :param nodes: VPP nodes. - :param mtu: Ethernet MTU size in Bytes. Default: 9200. - :type nodes: dict - :type mtu: int - """ - for node in nodes.values(): - if node[u"type"] == NodeType.DUT: - InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu) - @staticmethod def vpp_node_interfaces_ready_wait(node, retries=15): """Wait until all interfaces with admin-up are in link-up state. @@ -850,7 +828,7 @@ class InterfaceUtil: :raises RuntimeError: if it is unable to create VxLAN interface on the node. """ - cmd = u"vxlan_add_del_tunnel" + cmd = u"vxlan_add_del_tunnel_v3" args = dict( is_add=True, instance=Constants.BITWISE_NON_ZERO, @@ -1061,6 +1039,74 @@ class InterfaceUtil: return ifc_name, sw_if_index + @staticmethod + def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip): + """Create GTPU interface and return sw if index of created interface. + + :param node: Node where to create GTPU interface. + :param teid: GTPU Tunnel Endpoint Identifier. + :param source_ip: Source IP of a GTPU Tunnel End Point. + :param destination_ip: Destination IP of a GTPU Tunnel End Point. + :type node: dict + :type teid: int + :type source_ip: str + :type destination_ip: str + :returns: SW IF INDEX of created interface. + :rtype: int + :raises RuntimeError: if it is unable to create GTPU interface on the + node. + """ + cmd = u"gtpu_add_del_tunnel" + args = dict( + is_add=True, + src_address=IPAddress.create_ip_address_object( + ip_address(source_ip) + ), + dst_address=IPAddress.create_ip_address_object( + ip_address(destination_ip) + ), + mcast_sw_if_index=Constants.BITWISE_NON_ZERO, + encap_vrf_id=0, + decap_next_index=2, + teid=teid + ) + err_msg = f"Failed to create GTPU tunnel interface " \ + f"on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: + sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + + if_key = Topology.add_new_port(node, u"gtpu_tunnel") + Topology.update_interface_sw_if_index(node, if_key, sw_if_index) + ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) + Topology.update_interface_name(node, if_key, ifc_name) + + return sw_if_index + + @staticmethod + def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index): + """Enable GTPU offload RX onto interface. + + :param node: Node to run command on. + :param interface: Name of the specific interface. + :param gtpu_if_index: Index of GTPU tunnel interface. + + :type node: dict + :type interface: str + :type gtpu_interface: int + """ + sw_if_index = Topology.get_interface_sw_index(node, interface) + + cmd = u"gtpu_offload_rx" + args = dict( + hw_if_index=sw_if_index, + sw_if_index=gtpu_if_index, + enable=True + ) + + err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: + papi_exec.add(cmd, **args).get_reply(err_msg) + @staticmethod def vpp_create_loopback(node, mac=None): """Create loopback interface on VPP node. @@ -1216,8 +1262,19 @@ class InterfaceUtil: txq_size=txq_size ) err_msg = f"Failed to create AVF interface on host {node[u'host']}" - with PapiSocketExecutor(node) as papi_exec: - sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + + # FIXME: Remove once the fw/driver is upgraded. + for _ in range(10): + with PapiSocketExecutor(node) as papi_exec: + try: + sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index( + err_msg + ) + break + except AssertionError: + logger.error(err_msg) + else: + raise AssertionError(err_msg) InterfaceUtil.add_eth_interface( node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf", @@ -1306,7 +1363,7 @@ class InterfaceUtil: node, u"set logging class rdma level debug" ) - cmd = u"rdma_create_v2" + cmd = u"rdma_create_v3" pci_addr = Topology.get_interface_pci_addr(node, if_key) args = dict( name=InterfaceUtil.pci_to_eth(node, pci_addr), @@ -1318,6 +1375,7 @@ class InterfaceUtil: # Note: Set True for non-jumbo packets. no_multi_seg=False, max_pktlen=0, + # TODO: Apply desired RSS flags. ) err_msg = f"Failed to create RDMA interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: @@ -1729,14 +1787,19 @@ class InterfaceUtil: ) elif driver == u"af_xdp": if kernel_driver not in ( - u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"): + u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core", + u"ixgbe"): raise RuntimeError( - f"AF_XDP needs ice or i40e or rdma compatible driver, not " + f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not " f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}" ) vf_keys = InterfaceUtil.init_generic_interface( node, ifc_key, numvfs=numvfs, osi_layer=osi_layer ) + elif driver == u"rdma-core": + vf_keys = InterfaceUtil.init_generic_interface( + node, ifc_key, numvfs=numvfs, osi_layer=osi_layer + ) return vf_keys @staticmethod @@ -1770,8 +1833,9 @@ class InterfaceUtil: # PCI device must be re-bound to kernel driver before creating VFs. DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True) # Stop VPP to prevent deadlock. - # Unbind from current driver. - DUTSetup.pci_driver_unbind(node, pf_pci_addr) + # Unbind from current driver if bound. + if current_driver: + DUTSetup.pci_driver_unbind(node, pf_pci_addr) # Bind to kernel driver. DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver) @@ -1887,64 +1951,69 @@ class InterfaceUtil: @staticmethod def vpp_round_robin_rx_placement( - node, prefix, dp_worker_limit=None): + node, prefix, workers=None): """Set Round Robin interface RX placement on all worker threads on node. - If specified, dp_core_limit limits the number of physical cores used + If specified, workers limits the number of physical cores used for data plane I/O work. Other cores are presumed to do something else, e.g. asynchronous crypto processing. None means all workers are used for data plane work. - Note this keyword specifies workers, not cores. :param node: Topology nodes. :param prefix: Interface name prefix. - :param dp_worker_limit: How many cores for data plane work. + :param workers: Comma separated worker index numbers intended for + dataplane work. :type node: dict :type prefix: str - :type dp_worker_limit: Optional[int] + :type workers: str """ - worker_id = 0 - worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1 - if dp_worker_limit is not None: - worker_cnt = min(worker_cnt, dp_worker_limit) + thread_data = VPPUtil.vpp_show_threads(node) + worker_cnt = len(thread_data) - 1 if not worker_cnt: - return + return None + worker_ids = list() + if workers: + for item in thread_data: + if str(item.cpu_id) in workers.split(u","): + worker_ids.append(item.id) + else: + for item in thread_data: + if u"vpp_main" not in item.name: + worker_ids.append(item.id) + + worker_idx = 0 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node): for interface in node[u"interfaces"].values(): if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \ and prefix in interface[u"name"]: InterfaceUtil.vpp_sw_interface_set_rx_placement( node, placement[u"sw_if_index"], placement[u"queue_id"], - worker_id % worker_cnt + worker_ids[worker_idx % len(worker_ids)] - 1 ) - worker_id += 1 + worker_idx += 1 @staticmethod def vpp_round_robin_rx_placement_on_all_duts( - nodes, prefix, dp_core_limit=None): - """Set Round Robin interface RX placement on all worker threads + nodes, prefix, workers=None): + """Set Round Robin interface RX placement on worker threads on all DUTs. - If specified, dp_core_limit limits the number of physical cores used + If specified, workers limits the number of physical cores used for data plane I/O work. Other cores are presumed to do something else, e.g. asynchronous crypto processing. None means all cores are used for data plane work. - Note this keyword specifies cores, not workers. :param nodes: Topology nodes. :param prefix: Interface name prefix. - :param dp_worker_limit: How many cores for data plane work. + :param workers: Comma separated worker index numbers intended for + dataplane work. :type nodes: dict :type prefix: str - :type dp_worker_limit: Optional[int] + :type workers: str """ for node in nodes.values(): if node[u"type"] == NodeType.DUT: - dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt( - phy_cores=dp_core_limit, - smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]), - ) InterfaceUtil.vpp_round_robin_rx_placement( - node, prefix, dp_worker_limit + node, prefix, workers )