X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FInterfaceUtil.py;h=94c78a1bef9379d80355451033b7261c393c4584;hp=fed2beed3b520d11a7d5b1281ec41f45d673e493;hb=HEAD;hpb=a275fa0062158d712152f542b7bc9ec40b5c5f31 diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py index fed2beed3b..ff013307bc 100644 --- a/resources/libraries/python/InterfaceUtil.py +++ b/resources/libraries/python/InterfaceUtil.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 Cisco and/or its affiliates. +# Copyright (c) 2024 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -13,18 +13,19 @@ """Interface util library.""" +from json import loads from time import sleep from enum import IntEnum from ipaddress import ip_address from robot.api import logger +from robot.libraries.BuiltIn import BuiltIn from resources.libraries.python.Constants import Constants from resources.libraries.python.DUTSetup import DUTSetup from resources.libraries.python.IPAddress import IPAddress from resources.libraries.python.L2Util import L2Util from resources.libraries.python.PapiExecutor import PapiSocketExecutor -from resources.libraries.python.parsers.JsonParser import JsonParser from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error from resources.libraries.python.topology import NodeType, Topology from resources.libraries.python.VPPUtil import VPPUtil @@ -212,6 +213,10 @@ class InterfaceUtil: raise ValueError(f"Unknown if_type: {if_type}") if node[u"type"] == NodeType.DUT: + if sw_if_index is None: + raise ValueError( + f"Interface index for {interface} not assigned by VPP." + ) if state == u"up": flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value elif state == u"down": @@ -290,6 +295,21 @@ class InterfaceUtil: cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}" exec_cmd_no_error(node, cmd, sudo=True) + @staticmethod + def set_interface_xdp_off(node, pf_pcis): + """Detaches any currently attached XDP/BPF program from the specified + interfaces. + + :param node: Topology node. + :param pf_pcis: List of node's interfaces PCI addresses. + :type nodes: dict + :type pf_pcis: list + """ + for pf_pci in pf_pcis: + pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci) + cmd = f"ip link set dev {pf_eth} xdp off" + exec_cmd_no_error(node, cmd, sudo=True) + @staticmethod def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"): """Set Ethernet flow control for specified interfaces. @@ -328,11 +348,13 @@ class InterfaceUtil: exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def vpp_set_interface_mtu(node, interface, mtu=9200): - """Set Ethernet MTU on interface. + def vpp_set_interface_mtu(node, interface, mtu): + """Apply new MTU value to a VPP hardware interface. + + The interface should be down when this is called. :param node: VPP node. - :param interface: Interface to setup MTU. Default: 9200. + :param interface: Interface to set MTU on. :param mtu: Ethernet MTU size in Bytes. :type node: dict :type interface: str or int @@ -342,43 +364,11 @@ class InterfaceUtil: sw_if_index = Topology.get_interface_sw_index(node, interface) else: sw_if_index = interface - cmd = u"hw_interface_set_mtu" err_msg = f"Failed to set interface MTU on host {node[u'host']}" - args = dict( - sw_if_index=sw_if_index, - mtu=int(mtu) - ) - try: - with PapiSocketExecutor(node) as papi_exec: - papi_exec.add(cmd, **args).get_reply(err_msg) - except AssertionError as err: - logger.debug(f"Setting MTU failed.\n{err}") - - @staticmethod - def vpp_set_interfaces_mtu_on_node(node, mtu=9200): - """Set Ethernet MTU on all interfaces. - - :param node: VPP node. - :param mtu: Ethernet MTU size in Bytes. Default: 9200. - :type node: dict - :type mtu: int - """ - for interface in node[u"interfaces"]: - InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu) - - @staticmethod - def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200): - """Set Ethernet MTU on all interfaces on all DUTs. - - :param nodes: VPP nodes. - :param mtu: Ethernet MTU size in Bytes. Default: 9200. - :type nodes: dict - :type mtu: int - """ - for node in nodes.values(): - if node[u"type"] == NodeType.DUT: - InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu) + args = dict(sw_if_index=sw_if_index, mtu=int(mtu)) + with PapiSocketExecutor(node) as papi_exec: + papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod def vpp_node_interfaces_ready_wait(node, retries=15): @@ -733,9 +723,8 @@ class InterfaceUtil: ret_code, stdout, _ = ssh.exec_command(cmd) if int(ret_code) != 0: raise RuntimeError(u"Get interface name and MAC failed") - tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}" - interfaces = JsonParser().parse_data(tmp) + interfaces = loads("{" + stdout.rstrip().replace("\n", ",") + "}") for interface in node[u"interfaces"].values(): name = interfaces.get(interface[u"mac_address"]) if name is None: @@ -849,7 +838,7 @@ class InterfaceUtil: :raises RuntimeError: if it is unable to create VxLAN interface on the node. """ - cmd = u"vxlan_add_del_tunnel" + cmd = u"vxlan_add_del_tunnel_v3" args = dict( is_add=True, instance=Constants.BITWISE_NON_ZERO, @@ -903,7 +892,7 @@ class InterfaceUtil: err_msg = f"Failed to set VXLAN bypass on interface " \ f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: - papi_exec.add(cmd, **args).get_replies(err_msg) + papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod def vxlan_dump(node, interface=None): @@ -1077,7 +1066,7 @@ class InterfaceUtil: :raises RuntimeError: if it is unable to create GTPU interface on the node. """ - cmd = u"gtpu_add_del_tunnel" + cmd = u"gtpu_add_del_tunnel_v2" args = dict( is_add=True, src_address=IPAddress.create_ip_address_object( @@ -1088,8 +1077,10 @@ class InterfaceUtil: ), mcast_sw_if_index=Constants.BITWISE_NON_ZERO, encap_vrf_id=0, - decap_next_index=2, - teid=teid + decap_next_index=2, # ipv4 + teid=teid, + # pdu_extension: Unused, false by default. + # qfi: Irrelevant when pdu_extension is not used. ) err_msg = f"Failed to create GTPU tunnel interface " \ f"on host {node[u'host']}" @@ -1103,6 +1094,31 @@ class InterfaceUtil: return sw_if_index + @staticmethod + def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index): + """Enable GTPU offload RX onto interface. + + :param node: Node to run command on. + :param interface: Name of the specific interface. + :param gtpu_if_index: Index of GTPU tunnel interface. + + :type node: dict + :type interface: str + :type gtpu_interface: int + """ + sw_if_index = Topology.get_interface_sw_index(node, interface) + + cmd = u"gtpu_offload_rx" + args = dict( + hw_if_index=sw_if_index, + sw_if_index=gtpu_if_index, + enable=True + ) + + err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: + papi_exec.add(cmd, **args).get_reply(err_msg) + @staticmethod def vpp_create_loopback(node, mac=None): """Create loopback interface on VPP node. @@ -1258,8 +1274,19 @@ class InterfaceUtil: txq_size=txq_size ) err_msg = f"Failed to create AVF interface on host {node[u'host']}" - with PapiSocketExecutor(node) as papi_exec: - sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) + + # FIXME: Remove once the fw/driver is upgraded. + for _ in range(10): + with PapiSocketExecutor(node) as papi_exec: + try: + sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index( + err_msg + ) + break + except AssertionError: + logger.error(err_msg) + else: + raise AssertionError(err_msg) InterfaceUtil.add_eth_interface( node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf", @@ -1296,7 +1323,7 @@ class InterfaceUtil: node, u"set logging class af_xdp level debug" ) - cmd = u"af_xdp_create" + cmd = u"af_xdp_create_v3" pci_addr = Topology.get_interface_pci_addr(node, if_key) args = dict( name=InterfaceUtil.pci_to_eth(node, pci_addr), @@ -1348,7 +1375,7 @@ class InterfaceUtil: node, u"set logging class rdma level debug" ) - cmd = u"rdma_create_v2" + cmd = u"rdma_create_v4" pci_addr = Topology.get_interface_pci_addr(node, if_key) args = dict( name=InterfaceUtil.pci_to_eth(node, pci_addr), @@ -1360,6 +1387,9 @@ class InterfaceUtil: # Note: Set True for non-jumbo packets. no_multi_seg=False, max_pktlen=0, + # TODO: Apply desired RSS flags. + # rss4 kept 0 (auto) as API default. + # rss6 kept 0 (auto) as API default. ) err_msg = f"Failed to create RDMA interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: @@ -1771,14 +1801,19 @@ class InterfaceUtil: ) elif driver == u"af_xdp": if kernel_driver not in ( - u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core"): + u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core", + u"ixgbe"): raise RuntimeError( - f"AF_XDP needs ice or i40e or rdma compatible driver, not " + f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not " f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}" ) vf_keys = InterfaceUtil.init_generic_interface( node, ifc_key, numvfs=numvfs, osi_layer=osi_layer ) + elif driver == u"rdma-core": + vf_keys = InterfaceUtil.init_generic_interface( + node, ifc_key, numvfs=numvfs, osi_layer=osi_layer + ) return vf_keys @staticmethod @@ -1812,13 +1847,14 @@ class InterfaceUtil: # PCI device must be re-bound to kernel driver before creating VFs. DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True) # Stop VPP to prevent deadlock. - # Unbind from current driver. - DUTSetup.pci_driver_unbind(node, pf_pci_addr) + # Unbind from current driver if bound. + if current_driver: + DUTSetup.pci_driver_unbind(node, pf_pci_addr) # Bind to kernel driver. DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver) # Initialize PCI VFs. - DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs) + DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs=numvfs) if not numvfs: if osi_layer == u"L2": @@ -1847,12 +1883,20 @@ class InterfaceUtil: node, pf_dev, state=u"up" ) - DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id) - DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver) + vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id) + current_driver = DUTSetup.get_pci_dev_driver( + node, vf_pci_addr.replace(":", r"\:") + ) + if current_driver: + DUTSetup.pci_vf_driver_unbind( + node, pf_pci_addr, vf_id + ) + DUTSetup.pci_vf_driver_bind( + node, pf_pci_addr, vf_id, uio_driver + ) # Add newly created ports into topology file vf_ifc_name = f"{ifc_key}_vif" - vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id) vf_ifc_key = Topology.add_new_port(node, vf_ifc_name) Topology.update_interface_name( node, vf_ifc_key, vf_ifc_name+str(vf_id+1) @@ -1949,7 +1993,7 @@ class InterfaceUtil: thread_data = VPPUtil.vpp_show_threads(node) worker_cnt = len(thread_data) - 1 if not worker_cnt: - return None + return worker_ids = list() if workers: for item in thread_data: @@ -1973,7 +2017,7 @@ class InterfaceUtil: @staticmethod def vpp_round_robin_rx_placement_on_all_duts( - nodes, prefix, workers=None): + nodes, prefix, use_dp_cores=False): """Set Round Robin interface RX placement on worker threads on all DUTs. @@ -1984,14 +2028,18 @@ class InterfaceUtil: :param nodes: Topology nodes. :param prefix: Interface name prefix. - :param workers: Comma separated worker index numbers intended for - dataplane work. + :param use_dp_cores: Limit to dataplane cores. :type nodes: dict :type prefix: str - :type workers: str + :type use_dp_cores: bool """ - for node in nodes.values(): - if node[u"type"] == NodeType.DUT: + for node_name, node in nodes.items(): + if node["type"] == NodeType.DUT: + workers = None + if use_dp_cores: + workers = BuiltIn().get_variable_value( + f"${{{node_name}_cpu_dp}}" + ) InterfaceUtil.vpp_round_robin_rx_placement( node, prefix, workers )